#include <unistd.h>
#include <limits.h>
#include <assert.h>
-#include <linux/memfd.h>
#include <sys/mman.h>
#include "anv_private.h"
-#include "util/hash_table.h"
-#include "util/simple_mtx.h"
+#include "common/gen_aux_map.h"
+#include "util/anon_file.h"
#ifdef HAVE_VALGRIND
#define VG_NOACCESS_READ(__ptr) ({ \
#define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
#endif
+#ifndef MAP_POPULATE
+#define MAP_POPULATE 0
+#endif
+
/* Design goals:
*
* - Lock free (except when resizing underlying bos)
struct anv_mmap_cleanup {
void *map;
size_t size;
- uint32_t gem_handle;
};
-#define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
-
-#ifndef HAVE_MEMFD_CREATE
-static inline int
-memfd_create(const char *name, unsigned int flags)
-{
- return syscall(SYS_memfd_create, name, flags);
-}
-#endif
-
static inline uint32_t
ilog2_round_up(uint32_t value)
{
table->device = device;
- table->fd = memfd_create("state table", MFD_CLOEXEC);
- if (table->fd == -1)
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
-
/* Just make it 2GB up-front. The Linux kernel won't actually back it
* with pages until we either map and fault on one of them or we use
* userptr and send a chunk of it off to the GPU.
*/
- if (ftruncate(table->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
+ table->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "state table");
+ if (table->fd == -1) {
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
map = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, table->fd, 0);
if (map == MAP_FAILED) {
- return vk_errorf(table->device->instance, table->device,
+ return vk_errorf(table->device, table->device,
VK_ERROR_OUT_OF_HOST_MEMORY, "mmap failed: %m");
}
return NULL;
}
-/* All pointers in the ptr_free_list are assumed to be page-aligned. This
- * means that the bottom 12 bits should all be zero.
- */
-#define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
-#define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
-#define PFL_PACK(ptr, count) ({ \
- (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
-})
-
-static bool
-anv_ptr_free_list_pop(void **list, void **elem)
-{
- void *current = *list;
- while (PFL_PTR(current) != NULL) {
- void **next_ptr = PFL_PTR(current);
- void *new_ptr = VG_NOACCESS_READ(next_ptr);
- unsigned new_count = PFL_COUNT(current) + 1;
- void *new = PFL_PACK(new_ptr, new_count);
- void *old = __sync_val_compare_and_swap(list, current, new);
- if (old == current) {
- *elem = PFL_PTR(current);
- return true;
- }
- current = old;
- }
-
- return false;
-}
-
-static void
-anv_ptr_free_list_push(void **list, void *elem)
-{
- void *old, *current;
- void **next_ptr = elem;
-
- /* The pointer-based free list requires that the pointer be
- * page-aligned. This is because we use the bottom 12 bits of the
- * pointer to store a counter to solve the ABA concurrency problem.
- */
- assert(((uintptr_t)elem & 0xfff) == 0);
-
- old = *list;
- do {
- current = old;
- VG_NOACCESS_WRITE(next_ptr, PFL_PTR(current));
- unsigned new_count = PFL_COUNT(current) + 1;
- void *new = PFL_PACK(elem, new_count);
- old = __sync_val_compare_and_swap(list, current, new);
- } while (old != current);
-}
-
static VkResult
anv_block_pool_expand_range(struct anv_block_pool *pool,
uint32_t center_bo_offset, uint32_t size);
anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device,
uint64_t start_address,
- uint32_t initial_size,
- uint64_t bo_flags)
+ uint32_t initial_size)
{
VkResult result;
pool->device = device;
- pool->bo_flags = bo_flags;
+ pool->use_softpin = device->physical->use_softpin;
pool->nbos = 0;
pool->size = 0;
pool->center_bo_offset = 0;
pool->start_address = gen_canonical_address(start_address);
pool->map = NULL;
- /* This pointer will always point to the first BO in the list */
- pool->bo = &pool->bos[0];
-
- anv_bo_init(pool->bo, 0, 0);
-
- if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) {
- pool->fd = memfd_create("block pool", MFD_CLOEXEC);
- if (pool->fd == -1)
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
-
+ if (pool->use_softpin) {
+ pool->bo = NULL;
+ pool->fd = -1;
+ } else {
/* Just make it 2GB up-front. The Linux kernel won't actually back it
* with pages until we either map and fault on one of them or we use
* userptr and send a chunk of it off to the GPU.
*/
- if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
- goto fail_fd;
- }
- } else {
- pool->fd = -1;
+ pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
+ if (pool->fd == -1)
+ return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+
+ pool->wrapper_bo = (struct anv_bo) {
+ .refcount = 1,
+ .offset = -1,
+ .is_wrapper = true,
+ };
+ pool->bo = &pool->wrapper_bo;
}
if (!u_vector_init(&pool->mmap_cleanups,
fail_mmap_cleanups:
u_vector_finish(&pool->mmap_cleanups);
fail_fd:
- if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
+ if (pool->fd >= 0)
close(pool->fd);
return result;
void
anv_block_pool_finish(struct anv_block_pool *pool)
{
- struct anv_mmap_cleanup *cleanup;
- const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED);
-
- u_vector_foreach(cleanup, &pool->mmap_cleanups) {
- if (use_softpin)
- anv_gem_munmap(cleanup->map, cleanup->size);
- else
- munmap(cleanup->map, cleanup->size);
-
- if (cleanup->gem_handle)
- anv_gem_close(pool->device, cleanup->gem_handle);
+ anv_block_pool_foreach_bo(bo, pool) {
+ if (bo->map)
+ anv_gem_munmap(pool->device, bo->map, bo->size);
+ anv_gem_close(pool->device, bo->gem_handle);
}
+ struct anv_mmap_cleanup *cleanup;
+ u_vector_foreach(cleanup, &pool->mmap_cleanups)
+ munmap(cleanup->map, cleanup->size);
u_vector_finish(&pool->mmap_cleanups);
- if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
+
+ if (pool->fd >= 0)
close(pool->fd);
}
anv_block_pool_expand_range(struct anv_block_pool *pool,
uint32_t center_bo_offset, uint32_t size)
{
- void *map;
- uint32_t gem_handle;
- struct anv_mmap_cleanup *cleanup;
- const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED);
-
/* Assert that we only ever grow the pool */
assert(center_bo_offset >= pool->back_state.end);
assert(size - center_bo_offset >= pool->state.end);
/* Assert that we don't go outside the bounds of the memfd */
assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
- assert(use_softpin ||
+ assert(pool->use_softpin ||
size - center_bo_offset <=
BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
- cleanup = u_vector_add(&pool->mmap_cleanups);
- if (!cleanup)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- *cleanup = ANV_MMAP_CLEANUP_INIT;
-
- uint32_t newbo_size = size - pool->size;
- if (use_softpin) {
- gem_handle = anv_gem_create(pool->device, newbo_size);
- map = anv_gem_mmap(pool->device, gem_handle, 0, newbo_size, 0);
- if (map == MAP_FAILED)
- return vk_errorf(pool->device->instance, pool->device,
- VK_ERROR_MEMORY_MAP_FAILED, "gem mmap failed: %m");
- assert(center_bo_offset == 0);
- } else {
- /* Just leak the old map until we destroy the pool. We can't munmap it
- * without races or imposing locking on the block allocate fast path. On
- * the whole the leaked maps adds up to less than the size of the
- * current map. MAP_POPULATE seems like the right thing to do, but we
- * should try to get some numbers.
- */
- map = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, pool->fd,
- BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
- if (map == MAP_FAILED)
- return vk_errorf(pool->device->instance, pool->device,
- VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
-
- /* Now that we mapped the new memory, we can write the new
- * center_bo_offset back into pool and update pool->map. */
- pool->center_bo_offset = center_bo_offset;
- pool->map = map + center_bo_offset;
- gem_handle = anv_gem_userptr(pool->device, map, size);
- if (gem_handle == 0) {
- munmap(map, size);
- return vk_errorf(pool->device->instance, pool->device,
- VK_ERROR_TOO_MANY_OBJECTS, "userptr failed: %m");
- }
- }
-
- cleanup->map = map;
- cleanup->size = use_softpin ? newbo_size : size;
- cleanup->gem_handle = gem_handle;
-
- /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
- * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
- * always created as I915_CACHING_CACHED, which on non-LLC means
- * snooped.
- *
- * On platforms that support softpin, we are not going to use userptr
- * anymore, but we still want to rely on the snooped states. So make sure
- * everything is set to I915_CACHING_CACHED.
- */
- if (!pool->device->info.has_llc)
- anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_CACHED);
-
- /* For block pool BOs we have to be a bit careful about where we place them
+ /* For state pool BOs we have to be a bit careful about where we place them
* in the GTT. There are two documented workarounds for state base address
* placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
* which state that those two base addresses do not support 48-bit
* BO to some particular location of our choosing, but that's significantly
* more work than just not setting a flag. So, we explicitly DO NOT set
* the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
- * hard work for us.
+ * hard work for us. When using softpin, we're in control and the fixed
+ * addresses we choose are fine for base addresses.
*/
- struct anv_bo *bo;
- uint32_t bo_size;
- uint64_t bo_offset;
+ enum anv_bo_alloc_flags bo_alloc_flags = ANV_BO_ALLOC_CAPTURE;
+ if (!pool->use_softpin)
+ bo_alloc_flags |= ANV_BO_ALLOC_32BIT_ADDRESS;
- assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS);
-
- if (use_softpin) {
- /* With softpin, we add a new BO to the pool, and set its offset to right
- * where the previous BO ends (the end of the pool).
- */
- bo = &pool->bos[pool->nbos++];
- bo_size = newbo_size;
- bo_offset = pool->start_address + pool->size;
+ if (pool->use_softpin) {
+ uint32_t new_bo_size = size - pool->size;
+ struct anv_bo *new_bo;
+ assert(center_bo_offset == 0);
+ VkResult result = anv_device_alloc_bo(pool->device, new_bo_size,
+ bo_alloc_flags |
+ ANV_BO_ALLOC_FIXED_ADDRESS |
+ ANV_BO_ALLOC_MAPPED |
+ ANV_BO_ALLOC_SNOOPED,
+ pool->start_address + pool->size,
+ &new_bo);
+ if (result != VK_SUCCESS)
+ return result;
+
+ pool->bos[pool->nbos++] = new_bo;
+
+ /* This pointer will always point to the first BO in the list */
+ pool->bo = pool->bos[0];
} else {
- /* Without softpin, we just need one BO, and we already have a pointer to
- * it. Simply "allocate" it from our array if we didn't do it before.
- * The offset doesn't matter since we are not pinning the BO anyway.
+ /* Just leak the old map until we destroy the pool. We can't munmap it
+ * without races or imposing locking on the block allocate fast path. On
+ * the whole the leaked maps adds up to less than the size of the
+ * current map. MAP_POPULATE seems like the right thing to do, but we
+ * should try to get some numbers.
*/
- if (pool->nbos == 0)
- pool->nbos++;
- bo = pool->bo;
- bo_size = size;
- bo_offset = 0;
- }
-
- anv_bo_init(bo, gem_handle, bo_size);
- bo->offset = bo_offset;
- bo->flags = pool->bo_flags;
- bo->map = map;
- pool->size = size;
-
- return VK_SUCCESS;
-}
+ void *map = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, pool->fd,
+ BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
+ if (map == MAP_FAILED)
+ return vk_errorf(pool->device, pool->device,
+ VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
-static struct anv_bo *
-anv_block_pool_get_bo(struct anv_block_pool *pool, int32_t *offset)
-{
- struct anv_bo *bo, *bo_found = NULL;
- int32_t cur_offset = 0;
+ struct anv_bo *new_bo;
+ VkResult result = anv_device_import_bo_from_host_ptr(pool->device,
+ map, size,
+ bo_alloc_flags,
+ 0 /* client_address */,
+ &new_bo);
+ if (result != VK_SUCCESS) {
+ munmap(map, size);
+ return result;
+ }
- assert(offset);
+ struct anv_mmap_cleanup *cleanup = u_vector_add(&pool->mmap_cleanups);
+ if (!cleanup) {
+ munmap(map, size);
+ anv_device_release_bo(pool->device, new_bo);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ cleanup->map = map;
+ cleanup->size = size;
- if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
- return pool->bo;
+ /* Now that we mapped the new memory, we can write the new
+ * center_bo_offset back into pool and update pool->map. */
+ pool->center_bo_offset = center_bo_offset;
+ pool->map = map + center_bo_offset;
- anv_block_pool_foreach_bo(bo, pool) {
- if (*offset < cur_offset + bo->size) {
- bo_found = bo;
- break;
- }
- cur_offset += bo->size;
+ pool->bos[pool->nbos++] = new_bo;
+ pool->wrapper_bo.map = new_bo;
}
- assert(bo_found != NULL);
- *offset -= cur_offset;
+ assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS);
+ pool->size = size;
- return bo_found;
+ return VK_SUCCESS;
}
/** Returns current memory map of the block pool.
* rather than the start of the block pool BO map.
*/
void*
-anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
+anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t size)
{
- if (pool->bo_flags & EXEC_OBJECT_PINNED) {
- struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);
- return bo->map + offset;
+ if (pool->use_softpin) {
+ struct anv_bo *bo = NULL;
+ int32_t bo_offset = 0;
+ anv_block_pool_foreach_bo(iter_bo, pool) {
+ if (offset < bo_offset + iter_bo->size) {
+ bo = iter_bo;
+ break;
+ }
+ bo_offset += iter_bo->size;
+ }
+ assert(bo != NULL);
+ assert(offset >= bo_offset);
+ assert((offset - bo_offset) + size <= bo->size);
+
+ return bo->map + (offset - bo_offset);
} else {
return pool->map + offset;
}
* the pool and a 4K CPU page.
*/
static uint32_t
-anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
+anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
+ uint32_t contiguous_size)
{
VkResult result = VK_SUCCESS;
*/
assert(old_size > 0);
+ const uint32_t old_back = pool->center_bo_offset;
+ const uint32_t old_front = old_size - pool->center_bo_offset;
+
/* The back_used and front_used may actually be smaller than the actual
* requirement because they are based on the next pointers which are
* updated prior to calling this function.
*/
- uint32_t back_required = MAX2(back_used, pool->center_bo_offset);
- uint32_t front_required = MAX2(front_used, old_size - pool->center_bo_offset);
+ uint32_t back_required = MAX2(back_used, old_back);
+ uint32_t front_required = MAX2(front_used, old_front);
+
+ if (pool->use_softpin) {
+ /* With softpin, the pool is made up of a bunch of buffers with separate
+ * maps. Make sure we have enough contiguous space that we can get a
+ * properly contiguous map for the next chunk.
+ */
+ assert(old_back == 0);
+ front_required = MAX2(front_required, old_front + contiguous_size);
+ }
if (back_used * 2 <= back_required && front_used * 2 <= front_required) {
/* If we're in this case then this isn't the firsta allocation and we
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
- pool->bo->flags = pool->bo_flags;
-
done:
pthread_mutex_unlock(&pool->device->mutex);
if (state.next + block_size <= state.end) {
return state.next;
} else if (state.next <= state.end) {
- if (pool->bo_flags & EXEC_OBJECT_PINNED && state.next < state.end) {
+ if (pool->use_softpin && state.next < state.end) {
/* We need to grow the block pool, but still have some leftover
* space that can't be used by that particular allocation. So we
* add that as a "padding", and return it.
*/
new.next = state.next + block_size;
do {
- new.end = anv_block_pool_grow(pool, pool_state);
+ new.end = anv_block_pool_grow(pool, pool_state, block_size);
} while (new.end < new.next);
old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
VkResult
anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
- uint64_t start_address,
- uint32_t block_size,
- uint64_t bo_flags)
+ uint64_t base_address,
+ int32_t start_offset,
+ uint32_t block_size)
{
+ /* We don't want to ever see signed overflow */
+ assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
+
VkResult result = anv_block_pool_init(&pool->block_pool, device,
- start_address,
- block_size * 16,
- bo_flags);
+ base_address + start_offset,
+ block_size * 16);
if (result != VK_SUCCESS)
return result;
+ pool->start_offset = start_offset;
+
result = anv_state_table_init(&pool->table, device, 64);
if (result != VK_SUCCESS) {
anv_block_pool_finish(&pool->block_pool);
struct anv_state *state_i = anv_state_table_get(&pool->table,
st_idx + i);
state_i->alloc_size = block_size;
- state_i->offset = chunk_offset + block_size * i;
- state_i->map = anv_block_pool_map(&pool->block_pool, state_i->offset);
+ state_i->offset = pool->start_offset + chunk_offset + block_size * i;
+ state_i->map = anv_block_pool_map(&pool->block_pool,
+ state_i->offset,
+ state_i->alloc_size);
}
uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
state = anv_free_list_pop(&pool->buckets[bucket].free_list,
&pool->table);
if (state) {
- assert(state->offset >= 0);
+ assert(state->offset >= pool->start_offset);
goto done;
}
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
- state->offset = offset;
+ state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
- state->map = anv_block_pool_map(&pool->block_pool, offset);
+ state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
if (padding > 0) {
uint32_t return_offset = offset - padding;
struct anv_state *state;
uint32_t alloc_size = pool->block_size;
+ /* This function is only used with pools where start_offset == 0 */
+ assert(pool->start_offset == 0);
+
state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
if (state) {
- assert(state->offset < 0);
+ assert(state->offset < pool->start_offset);
goto done;
}
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
- state->offset = offset;
+ state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
- state->map = anv_block_pool_map(&pool->block_pool, state->offset);
+ state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
done:
VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
assert(util_is_power_of_two_or_zero(state.alloc_size));
unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
- if (state.offset < 0) {
+ if (state.offset < pool->start_offset) {
assert(state.alloc_size == pool->block_size);
anv_free_list_push(&pool->back_alloc_free_list,
&pool->table, state.idx, 1);
stream->block = ANV_STATE_NULL;
- stream->block_list = NULL;
-
/* Ensure that next + whatever > block_size. This way the first call to
* state_stream_alloc fetches a new block.
*/
stream->next = block_size;
+ util_dynarray_init(&stream->all_blocks, NULL);
+
VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
}
void
anv_state_stream_finish(struct anv_state_stream *stream)
{
- struct anv_state_stream_block *next = stream->block_list;
- while (next != NULL) {
- struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
- VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
- VG(VALGRIND_MAKE_MEM_UNDEFINED(next, stream->block_size));
- anv_state_pool_free_no_vg(stream->state_pool, sb.block);
- next = sb.next;
+ util_dynarray_foreach(&stream->all_blocks, struct anv_state, block) {
+ VG(VALGRIND_MEMPOOL_FREE(stream, block->map));
+ VG(VALGRIND_MAKE_MEM_NOACCESS(block->map, block->alloc_size));
+ anv_state_pool_free_no_vg(stream->state_pool, *block);
}
+ util_dynarray_fini(&stream->all_blocks);
VG(VALGRIND_DESTROY_MEMPOOL(stream));
}
stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
block_size, PAGE_SIZE);
+ util_dynarray_append(&stream->all_blocks,
+ struct anv_state, stream->block);
+ VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, block_size));
- struct anv_state_stream_block *sb = stream->block.map;
- VG_NOACCESS_WRITE(&sb->block, stream->block);
- VG_NOACCESS_WRITE(&sb->next, stream->block_list);
- stream->block_list = sb;
- VG(VG_NOACCESS_WRITE(&sb->_vg_ptr, NULL));
-
- VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, stream->block_size));
-
- /* Reset back to the start plus space for the header */
- stream->next = sizeof(*sb);
-
- offset = align_u32(stream->next, alignment);
+ /* Reset back to the start */
+ stream->next = offset = 0;
assert(offset + size <= stream->block.alloc_size);
}
+ const bool new_block = stream->next == 0;
struct anv_state state = stream->block;
state.offset += offset;
stream->next = offset + size;
-#ifdef HAVE_VALGRIND
- struct anv_state_stream_block *sb = stream->block_list;
- void *vg_ptr = VG_NOACCESS_READ(&sb->_vg_ptr);
- if (vg_ptr == NULL) {
- vg_ptr = state.map;
- VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr);
- VALGRIND_MEMPOOL_ALLOC(stream, vg_ptr, size);
+ if (new_block) {
+ assert(state.map == stream->block.map);
+ VG(VALGRIND_MEMPOOL_ALLOC(stream, state.map, size));
} else {
- void *state_end = state.map + state.alloc_size;
/* This only updates the mempool. The newly allocated chunk is still
* marked as NOACCESS. */
- VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, state_end - vg_ptr);
+ VG(VALGRIND_MEMPOOL_CHANGE(stream, stream->block.map, stream->block.map,
+ stream->next));
/* Mark the newly allocated chunk as undefined */
- VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size);
+ VG(VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size));
}
-#endif
return state;
}
-struct bo_pool_bo_link {
- struct bo_pool_bo_link *next;
- struct anv_bo bo;
-};
+void
+anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
+ struct anv_state_pool *parent,
+ uint32_t count, uint32_t size, uint32_t alignment)
+{
+ pool->pool = parent;
+ pool->reserved_blocks = ANV_FREE_LIST_EMPTY;
+ pool->count = count;
+
+ for (unsigned i = 0; i < count; i++) {
+ struct anv_state state = anv_state_pool_alloc(pool->pool, size, alignment);
+ anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
+ }
+}
+
+void
+anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool)
+{
+ struct anv_state *state;
+
+ while ((state = anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table))) {
+ anv_state_pool_free(pool->pool, *state);
+ pool->count--;
+ }
+ assert(pool->count == 0);
+}
+
+struct anv_state
+anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool)
+{
+ return *anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table);
+}
void
-anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
- uint64_t bo_flags)
+anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
+ struct anv_state state)
+{
+ anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
+}
+
+void
+anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device)
{
pool->device = device;
- pool->bo_flags = bo_flags;
- memset(pool->free_list, 0, sizeof(pool->free_list));
+ for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
+ util_sparse_array_free_list_init(&pool->free_list[i],
+ &device->bo_cache.bo_map, 0,
+ offsetof(struct anv_bo, free_index));
+ }
VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
}
anv_bo_pool_finish(struct anv_bo_pool *pool)
{
for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
- struct bo_pool_bo_link *link = PFL_PTR(pool->free_list[i]);
- while (link != NULL) {
- struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link);
-
- anv_gem_munmap(link_copy.bo.map, link_copy.bo.size);
- anv_vma_free(pool->device, &link_copy.bo);
- anv_gem_close(pool->device, link_copy.bo.gem_handle);
- link = link_copy.next;
+ while (1) {
+ struct anv_bo *bo =
+ util_sparse_array_free_list_pop_elem(&pool->free_list[i]);
+ if (bo == NULL)
+ break;
+
+ /* anv_device_release_bo is going to "free" it */
+ VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1));
+ anv_device_release_bo(pool->device, bo);
}
}
}
VkResult
-anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo, uint32_t size)
+anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size,
+ struct anv_bo **bo_out)
{
- VkResult result;
-
const unsigned size_log2 = size < 4096 ? 12 : ilog2_round_up(size);
const unsigned pow2_size = 1 << size_log2;
const unsigned bucket = size_log2 - 12;
assert(bucket < ARRAY_SIZE(pool->free_list));
- void *next_free_void;
- if (anv_ptr_free_list_pop(&pool->free_list[bucket], &next_free_void)) {
- struct bo_pool_bo_link *next_free = next_free_void;
- *bo = VG_NOACCESS_READ(&next_free->bo);
- assert(bo->gem_handle);
- assert(bo->map == next_free);
- assert(size <= bo->size);
-
+ struct anv_bo *bo =
+ util_sparse_array_free_list_pop_elem(&pool->free_list[bucket]);
+ if (bo != NULL) {
VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
-
+ *bo_out = bo;
return VK_SUCCESS;
}
- struct anv_bo new_bo;
-
- result = anv_bo_init_new(&new_bo, pool->device, pow2_size);
+ VkResult result = anv_device_alloc_bo(pool->device,
+ pow2_size,
+ ANV_BO_ALLOC_MAPPED |
+ ANV_BO_ALLOC_SNOOPED |
+ ANV_BO_ALLOC_CAPTURE,
+ 0 /* explicit_address */,
+ &bo);
if (result != VK_SUCCESS)
return result;
- new_bo.flags = pool->bo_flags;
-
- if (!anv_vma_alloc(pool->device, &new_bo))
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
-
- assert(new_bo.size == pow2_size);
-
- new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0);
- if (new_bo.map == MAP_FAILED) {
- anv_gem_close(pool->device, new_bo.gem_handle);
- anv_vma_free(pool->device, &new_bo);
- return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
- }
-
- /* We are removing the state flushes, so lets make sure that these buffers
- * are cached/snooped.
- */
- if (!pool->device->info.has_llc) {
- anv_gem_set_caching(pool->device, new_bo.gem_handle,
- I915_CACHING_CACHED);
- }
-
- *bo = new_bo;
-
+ /* We want it to look like it came from this pool */
+ VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
+ *bo_out = bo;
+
return VK_SUCCESS;
}
void
-anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo_in)
+anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo)
{
- /* Make a copy in case the anv_bo happens to be storred in the BO */
- struct anv_bo bo = *bo_in;
+ VG(VALGRIND_MEMPOOL_FREE(pool, bo->map));
- VG(VALGRIND_MEMPOOL_FREE(pool, bo.map));
-
- struct bo_pool_bo_link *link = bo.map;
- VG_NOACCESS_WRITE(&link->bo, bo);
-
- assert(util_is_power_of_two_or_zero(bo.size));
- const unsigned size_log2 = ilog2_round_up(bo.size);
+ assert(util_is_power_of_two_or_zero(bo->size));
+ const unsigned size_log2 = ilog2_round_up(bo->size);
const unsigned bucket = size_log2 - 12;
assert(bucket < ARRAY_SIZE(pool->free_list));
- anv_ptr_free_list_push(&pool->free_list[bucket], link);
+ assert(util_sparse_array_get(&pool->device->bo_cache.bo_map,
+ bo->gem_handle) == bo);
+ util_sparse_array_free_list_push(&pool->free_list[bucket],
+ &bo->gem_handle, 1);
}
// Scratch pool
{
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned i = 0; i < 16; i++) {
- struct anv_scratch_bo *bo = &pool->bos[i][s];
- if (bo->exists > 0) {
- anv_vma_free(device, &bo->bo);
- anv_gem_close(device, bo->bo.gem_handle);
- }
+ if (pool->bos[i][s] != NULL)
+ anv_device_release_bo(device, pool->bos[i][s]);
}
}
}
unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
assert(scratch_size_log2 < 16);
- struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage];
+ struct anv_bo *bo = p_atomic_read(&pool->bos[scratch_size_log2][stage]);
- /* We can use "exists" to shortcut and ignore the critical section */
- if (bo->exists)
- return &bo->bo;
+ if (bo != NULL)
+ return bo;
- pthread_mutex_lock(&device->mutex);
-
- __sync_synchronize();
- if (bo->exists) {
- pthread_mutex_unlock(&device->mutex);
- return &bo->bo;
- }
+ const struct gen_device_info *devinfo = &device->info;
- const struct anv_physical_device *physical_device =
- &device->instance->physicalDevice;
- const struct gen_device_info *devinfo = &physical_device->info;
+ unsigned subslices = MAX2(device->physical->subslice_total, 1);
- const unsigned subslices = MAX2(physical_device->subslice_total, 1);
+ /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
+ *
+ * "Scratch Space per slice is computed based on 4 sub-slices. SW
+ * must allocate scratch space enough so that each slice has 4
+ * slices allowed."
+ *
+ * According to the other driver team, this applies to compute shaders
+ * as well. This is not currently documented at all.
+ *
+ * This hack is no longer necessary on Gen11+.
+ *
+ * For, Gen11+, scratch space allocation is based on the number of threads
+ * in the base configuration.
+ */
+ if (devinfo->gen >= 12)
+ subslices = devinfo->num_subslices[0];
+ else if (devinfo->gen == 11)
+ subslices = 8;
+ else if (devinfo->gen >= 9)
+ subslices = 4 * devinfo->num_slices;
unsigned scratch_ids_per_subslice;
- if (devinfo->is_haswell) {
+ if (devinfo->gen >= 12) {
+ /* Same as ICL below, but with 16 EUs. */
+ scratch_ids_per_subslice = 16 * 8;
+ } else if (devinfo->gen == 11) {
+ /* The MEDIA_VFE_STATE docs say:
+ *
+ * "Starting with this configuration, the Maximum Number of
+ * Threads must be set to (#EU * 8) for GPGPU dispatches.
+ *
+ * Although there are only 7 threads per EU in the configuration,
+ * the FFTID is calculated as if there are 8 threads per EU,
+ * which in turn requires a larger amount of Scratch Space to be
+ * allocated by the driver."
+ */
+ scratch_ids_per_subslice = 8 * 8;
+ } else if (devinfo->is_haswell) {
/* WaCSScratchSize:hsw
*
* Haswell's scratch space address calculation appears to be sparse
uint32_t size = per_thread_scratch * max_threads[stage];
- anv_bo_init_new(&bo->bo, device, size);
-
/* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
* are still relative to the general state base address. When we emit
* STATE_BASE_ADDRESS, we set general state base address to 0 and the size
*
* so nothing will ever touch the top page.
*/
- assert(!(bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS));
-
- if (device->instance->physicalDevice.has_exec_async)
- bo->bo.flags |= EXEC_OBJECT_ASYNC;
-
- if (device->instance->physicalDevice.use_softpin)
- bo->bo.flags |= EXEC_OBJECT_PINNED;
-
- anv_vma_alloc(device, &bo->bo);
-
- /* Set the exists last because it may be read by other threads */
- __sync_synchronize();
- bo->exists = true;
-
- pthread_mutex_unlock(&device->mutex);
+ VkResult result = anv_device_alloc_bo(device, size,
+ ANV_BO_ALLOC_32BIT_ADDRESS,
+ 0 /* explicit_address */,
+ &bo);
+ if (result != VK_SUCCESS)
+ return NULL; /* TODO */
- return &bo->bo;
+ struct anv_bo *current_bo =
+ p_atomic_cmpxchg(&pool->bos[scratch_size_log2][stage], NULL, bo);
+ if (current_bo) {
+ anv_device_release_bo(device, bo);
+ return current_bo;
+ } else {
+ return bo;
+ }
}
-struct anv_cached_bo {
- struct anv_bo bo;
-
- uint32_t refcount;
-};
-
VkResult
anv_bo_cache_init(struct anv_bo_cache *cache)
{
- cache->bo_map = _mesa_pointer_hash_table_create(NULL);
- if (!cache->bo_map)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ util_sparse_array_init(&cache->bo_map, sizeof(struct anv_bo), 1024);
if (pthread_mutex_init(&cache->mutex, NULL)) {
- _mesa_hash_table_destroy(cache->bo_map, NULL);
+ util_sparse_array_finish(&cache->bo_map);
return vk_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
"pthread_mutex_init failed: %m");
}
void
anv_bo_cache_finish(struct anv_bo_cache *cache)
{
- _mesa_hash_table_destroy(cache->bo_map, NULL);
+ util_sparse_array_finish(&cache->bo_map);
pthread_mutex_destroy(&cache->mutex);
}
-static struct anv_cached_bo *
-anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
+#define ANV_BO_CACHE_SUPPORTED_FLAGS \
+ (EXEC_OBJECT_WRITE | \
+ EXEC_OBJECT_ASYNC | \
+ EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
+ EXEC_OBJECT_PINNED | \
+ EXEC_OBJECT_CAPTURE)
+
+static uint32_t
+anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags)
{
- struct hash_entry *entry =
- _mesa_hash_table_search(cache->bo_map,
- (const void *)(uintptr_t)gem_handle);
- if (!entry)
- return NULL;
+ struct anv_physical_device *pdevice = device->physical;
- struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
- assert(bo->bo.gem_handle == gem_handle);
+ uint64_t bo_flags = 0;
+ if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
+ pdevice->supports_48bit_addresses)
+ bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
- return bo;
-}
+ if ((alloc_flags & ANV_BO_ALLOC_CAPTURE) && pdevice->has_exec_capture)
+ bo_flags |= EXEC_OBJECT_CAPTURE;
-UNUSED static struct anv_bo *
-anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
-{
- pthread_mutex_lock(&cache->mutex);
+ if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) {
+ assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC);
+ bo_flags |= EXEC_OBJECT_WRITE;
+ }
- struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
+ bo_flags |= EXEC_OBJECT_ASYNC;
- pthread_mutex_unlock(&cache->mutex);
+ if (pdevice->use_softpin)
+ bo_flags |= EXEC_OBJECT_PINNED;
- return bo ? &bo->bo : NULL;
+ return bo_flags;
}
-#define ANV_BO_CACHE_SUPPORTED_FLAGS \
- (EXEC_OBJECT_WRITE | \
- EXEC_OBJECT_ASYNC | \
- EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
- EXEC_OBJECT_PINNED | \
- ANV_BO_EXTERNAL)
+static uint32_t
+anv_device_get_bo_align(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags)
+{
+ /* Gen12 CCS surface addresses need to be 64K aligned. */
+ if (device->info.gen >= 12 && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS))
+ return 64 * 1024;
+
+ return 4096;
+}
VkResult
-anv_bo_cache_alloc(struct anv_device *device,
- struct anv_bo_cache *cache,
- uint64_t size, uint64_t bo_flags,
- struct anv_bo **bo_out)
+anv_device_alloc_bo(struct anv_device *device,
+ uint64_t size,
+ enum anv_bo_alloc_flags alloc_flags,
+ uint64_t explicit_address,
+ struct anv_bo **bo_out)
{
- assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
-
- struct anv_cached_bo *bo =
- vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!bo)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (!device->physical->has_implicit_ccs)
+ assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
- bo->refcount = 1;
+ const uint32_t bo_flags =
+ anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
/* The kernel is going to give us whole pages anyway */
size = align_u64(size, 4096);
- VkResult result = anv_bo_init_new(&bo->bo, device, size);
- if (result != VK_SUCCESS) {
- vk_free(&device->alloc, bo);
- return result;
+ const uint32_t align = anv_device_get_bo_align(device, alloc_flags);
+
+ uint64_t ccs_size = 0;
+ if (device->info.has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) {
+ /* Align the size up to the next multiple of 64K so we don't have any
+ * AUX-TT entries pointing from a 64K page to itself.
+ */
+ size = align_u64(size, 64 * 1024);
+
+ /* See anv_bo::_ccs_size */
+ ccs_size = align_u64(DIV_ROUND_UP(size, GEN_AUX_MAP_GEN12_CCS_SCALE), 4096);
}
- bo->bo.flags = bo_flags;
+ uint32_t gem_handle = anv_gem_create(device, size + ccs_size);
+ if (gem_handle == 0)
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+ struct anv_bo new_bo = {
+ .gem_handle = gem_handle,
+ .refcount = 1,
+ .offset = -1,
+ .size = size,
+ ._ccs_size = ccs_size,
+ .flags = bo_flags,
+ .is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL),
+ .has_client_visible_address =
+ (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
+ .has_implicit_ccs = ccs_size > 0,
+ };
+
+ if (alloc_flags & ANV_BO_ALLOC_MAPPED) {
+ new_bo.map = anv_gem_mmap(device, new_bo.gem_handle, 0, size, 0);
+ if (new_bo.map == MAP_FAILED) {
+ anv_gem_close(device, new_bo.gem_handle);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ }
- if (!anv_vma_alloc(device, &bo->bo)) {
- anv_gem_close(device, bo->bo.gem_handle);
- vk_free(&device->alloc, bo);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "failed to allocate virtual address for BO");
+ if (alloc_flags & ANV_BO_ALLOC_SNOOPED) {
+ assert(alloc_flags & ANV_BO_ALLOC_MAPPED);
+ /* We don't want to change these defaults if it's going to be shared
+ * with another process.
+ */
+ assert(!(alloc_flags & ANV_BO_ALLOC_EXTERNAL));
+
+ /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
+ * I915_CACHING_NONE on non-LLC platforms. For many internal state
+ * objects, we'd rather take the snooping overhead than risk forgetting
+ * a CLFLUSH somewhere. Userptr objects are always created as
+ * I915_CACHING_CACHED, which on non-LLC means snooped so there's no
+ * need to do this there.
+ */
+ if (!device->info.has_llc) {
+ anv_gem_set_caching(device, new_bo.gem_handle,
+ I915_CACHING_CACHED);
+ }
}
- assert(bo->bo.gem_handle);
+ if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
+ new_bo.has_fixed_address = true;
+ new_bo.offset = explicit_address;
+ } else if (new_bo.flags & EXEC_OBJECT_PINNED) {
+ new_bo.offset = anv_vma_alloc(device, new_bo.size + new_bo._ccs_size,
+ align, alloc_flags, explicit_address);
+ if (new_bo.offset == 0) {
+ if (new_bo.map)
+ anv_gem_munmap(device, new_bo.map, size);
+ anv_gem_close(device, new_bo.gem_handle);
+ return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+ } else {
+ assert(!new_bo.has_client_visible_address);
+ }
- pthread_mutex_lock(&cache->mutex);
+ if (new_bo._ccs_size > 0) {
+ assert(device->info.has_aux_map);
+ gen_aux_map_add_mapping(device->aux_map_ctx,
+ gen_canonical_address(new_bo.offset),
+ gen_canonical_address(new_bo.offset + new_bo.size),
+ new_bo.size, 0 /* format_bits */);
+ }
- _mesa_hash_table_insert(cache->bo_map,
- (void *)(uintptr_t)bo->bo.gem_handle, bo);
+ assert(new_bo.gem_handle);
- pthread_mutex_unlock(&cache->mutex);
+ /* If we just got this gem_handle from anv_bo_init_new then we know no one
+ * else is touching this BO at the moment so we don't need to lock here.
+ */
+ struct anv_bo *bo = anv_device_lookup_bo(device, new_bo.gem_handle);
+ *bo = new_bo;
- *bo_out = &bo->bo;
+ *bo_out = bo;
return VK_SUCCESS;
}
VkResult
-anv_bo_cache_import_host_ptr(struct anv_device *device,
- struct anv_bo_cache *cache,
- void *host_ptr, uint32_t size,
- uint64_t bo_flags, struct anv_bo **bo_out)
+anv_device_import_bo_from_host_ptr(struct anv_device *device,
+ void *host_ptr, uint32_t size,
+ enum anv_bo_alloc_flags alloc_flags,
+ uint64_t client_address,
+ struct anv_bo **bo_out)
{
+ assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
+ ANV_BO_ALLOC_SNOOPED |
+ ANV_BO_ALLOC_FIXED_ADDRESS)));
+
+ /* We can't do implicit CCS with an aux table on shared memory */
+ if (!device->physical->has_implicit_ccs || device->info.has_aux_map)
+ assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
+
+ struct anv_bo_cache *cache = &device->bo_cache;
+ const uint32_t bo_flags =
+ anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
- assert((bo_flags & ANV_BO_EXTERNAL) == 0);
uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
if (!gem_handle)
pthread_mutex_lock(&cache->mutex);
- struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
- if (bo) {
+ struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
+ if (bo->refcount > 0) {
/* VK_EXT_external_memory_host doesn't require handling importing the
* same pointer twice at the same time, but we don't get in the way. If
* kernel gives us the same gem_handle, only succeed if the flags match.
*/
- if (bo_flags != bo->bo.flags) {
+ assert(bo->gem_handle == gem_handle);
+ if (bo_flags != bo->flags) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"same host pointer imported two different ways");
}
- __sync_fetch_and_add(&bo->refcount, 1);
- } else {
- bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!bo) {
- anv_gem_close(device, gem_handle);
+
+ if (bo->has_client_visible_address !=
+ ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported with and without buffer "
+ "device address");
}
- bo->refcount = 1;
-
- anv_bo_init(&bo->bo, gem_handle, size);
- bo->bo.flags = bo_flags;
-
- if (!anv_vma_alloc(device, &bo->bo)) {
- anv_gem_close(device, bo->bo.gem_handle);
+ if (client_address && client_address != gen_48b_address(bo->offset)) {
pthread_mutex_unlock(&cache->mutex);
- vk_free(&device->alloc, bo);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "failed to allocate virtual address for BO");
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported at two different "
+ "addresses");
+ }
+
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ struct anv_bo new_bo = {
+ .gem_handle = gem_handle,
+ .refcount = 1,
+ .offset = -1,
+ .size = size,
+ .map = host_ptr,
+ .flags = bo_flags,
+ .is_external = true,
+ .from_host_ptr = true,
+ .has_client_visible_address =
+ (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
+ };
+
+ assert(client_address == gen_48b_address(client_address));
+ if (new_bo.flags & EXEC_OBJECT_PINNED) {
+ assert(new_bo._ccs_size == 0);
+ new_bo.offset = anv_vma_alloc(device, new_bo.size,
+ anv_device_get_bo_align(device,
+ alloc_flags),
+ alloc_flags, client_address);
+ if (new_bo.offset == 0) {
+ anv_gem_close(device, new_bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+ } else {
+ assert(!new_bo.has_client_visible_address);
}
- _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ *bo = new_bo;
}
pthread_mutex_unlock(&cache->mutex);
- *bo_out = &bo->bo;
+ *bo_out = bo;
return VK_SUCCESS;
}
VkResult
-anv_bo_cache_import(struct anv_device *device,
- struct anv_bo_cache *cache,
- int fd, uint64_t bo_flags,
- struct anv_bo **bo_out)
+anv_device_import_bo(struct anv_device *device,
+ int fd,
+ enum anv_bo_alloc_flags alloc_flags,
+ uint64_t client_address,
+ struct anv_bo **bo_out)
{
+ assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
+ ANV_BO_ALLOC_SNOOPED |
+ ANV_BO_ALLOC_FIXED_ADDRESS)));
+
+ /* We can't do implicit CCS with an aux table on shared memory */
+ if (!device->physical->has_implicit_ccs || device->info.has_aux_map)
+ assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
+
+ struct anv_bo_cache *cache = &device->bo_cache;
+ const uint32_t bo_flags =
+ anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
- assert(bo_flags & ANV_BO_EXTERNAL);
pthread_mutex_lock(&cache->mutex);
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
- struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
- if (bo) {
+ struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
+ if (bo->refcount > 0) {
/* We have to be careful how we combine flags so that it makes sense.
* Really, though, if we get to this case and it actually matters, the
* client has imported a BO twice in different ways and they get what
* they have coming.
*/
- uint64_t new_flags = ANV_BO_EXTERNAL;
- new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_WRITE;
- new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_ASYNC;
- new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
- new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_PINNED;
+ uint64_t new_flags = 0;
+ new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_WRITE;
+ new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_ASYNC;
+ new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_PINNED;
+ new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_CAPTURE;
/* It's theoretically possible for a BO to get imported such that it's
* both pinned and not pinned. The only way this can happen is if it
* gets imported as both a semaphore and a memory object and that would
* be an application error. Just fail out in that case.
*/
- if ((bo->bo.flags & EXEC_OBJECT_PINNED) !=
+ if ((bo->flags & EXEC_OBJECT_PINNED) !=
(bo_flags & EXEC_OBJECT_PINNED)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported two different ways");
}
* app is actually that stupid.
*/
if ((new_flags & EXEC_OBJECT_PINNED) &&
- (bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
+ (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
(bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported on two different heaps");
}
- bo->bo.flags = new_flags;
+ if (bo->has_client_visible_address !=
+ ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported with and without buffer "
+ "device address");
+ }
+
+ if (client_address && client_address != gen_48b_address(bo->offset)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported at two different "
+ "addresses");
+ }
+
+ bo->flags = new_flags;
__sync_fetch_and_add(&bo->refcount, 1);
} else {
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
- bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!bo) {
- anv_gem_close(device, gem_handle);
- pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- bo->refcount = 1;
-
- anv_bo_init(&bo->bo, gem_handle, size);
- bo->bo.flags = bo_flags;
-
- if (!anv_vma_alloc(device, &bo->bo)) {
- anv_gem_close(device, bo->bo.gem_handle);
- pthread_mutex_unlock(&cache->mutex);
- vk_free(&device->alloc, bo);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "failed to allocate virtual address for BO");
+ struct anv_bo new_bo = {
+ .gem_handle = gem_handle,
+ .refcount = 1,
+ .offset = -1,
+ .size = size,
+ .flags = bo_flags,
+ .is_external = true,
+ .has_client_visible_address =
+ (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
+ };
+
+ assert(client_address == gen_48b_address(client_address));
+ if (new_bo.flags & EXEC_OBJECT_PINNED) {
+ assert(new_bo._ccs_size == 0);
+ new_bo.offset = anv_vma_alloc(device, new_bo.size,
+ anv_device_get_bo_align(device,
+ alloc_flags),
+ alloc_flags, client_address);
+ if (new_bo.offset == 0) {
+ anv_gem_close(device, new_bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+ } else {
+ assert(!new_bo.has_client_visible_address);
}
- _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ *bo = new_bo;
}
pthread_mutex_unlock(&cache->mutex);
- *bo_out = &bo->bo;
+ *bo_out = bo;
return VK_SUCCESS;
}
VkResult
-anv_bo_cache_export(struct anv_device *device,
- struct anv_bo_cache *cache,
- struct anv_bo *bo_in, int *fd_out)
+anv_device_export_bo(struct anv_device *device,
+ struct anv_bo *bo, int *fd_out)
{
- assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
- struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+ assert(anv_device_lookup_bo(device, bo->gem_handle) == bo);
/* This BO must have been flagged external in order for us to be able
* to export it. This is done based on external options passed into
* anv_AllocateMemory.
*/
- assert(bo->bo.flags & ANV_BO_EXTERNAL);
+ assert(bo->is_external);
- int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
+ int fd = anv_gem_handle_to_fd(device, bo->gem_handle);
if (fd < 0)
return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
}
void
-anv_bo_cache_release(struct anv_device *device,
- struct anv_bo_cache *cache,
- struct anv_bo *bo_in)
+anv_device_release_bo(struct anv_device *device,
+ struct anv_bo *bo)
{
- assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
- struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+ struct anv_bo_cache *cache = &device->bo_cache;
+ assert(anv_device_lookup_bo(device, bo->gem_handle) == bo);
/* Try to decrement the counter but don't go below one. If this succeeds
* then the refcount has been decremented and we are not the last
pthread_mutex_unlock(&cache->mutex);
return;
}
+ assert(bo->refcount == 0);
+
+ if (bo->map && !bo->from_host_ptr)
+ anv_gem_munmap(device, bo->map, bo->size);
+
+ if (bo->_ccs_size > 0) {
+ assert(device->physical->has_implicit_ccs);
+ assert(device->info.has_aux_map);
+ assert(bo->has_implicit_ccs);
+ gen_aux_map_unmap_range(device->aux_map_ctx,
+ gen_canonical_address(bo->offset),
+ bo->size);
+ }
- struct hash_entry *entry =
- _mesa_hash_table_search(cache->bo_map,
- (const void *)(uintptr_t)bo->bo.gem_handle);
- assert(entry);
- _mesa_hash_table_remove(cache->bo_map, entry);
+ if ((bo->flags & EXEC_OBJECT_PINNED) && !bo->has_fixed_address)
+ anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
- if (bo->bo.map)
- anv_gem_munmap(bo->bo.map, bo->bo.size);
+ uint32_t gem_handle = bo->gem_handle;
- anv_vma_free(device, &bo->bo);
+ /* Memset the BO just in case. The refcount being zero should be enough to
+ * prevent someone from assuming the data is valid but it's safer to just
+ * stomp to zero just in case. We explicitly do this *before* we close the
+ * GEM handle to ensure that if anyone allocates something and gets the
+ * same GEM handle, the memset has already happen and won't stomp all over
+ * any data they may write in this BO.
+ */
+ memset(bo, 0, sizeof(*bo));
- anv_gem_close(device, bo->bo.gem_handle);
+ anv_gem_close(device, gem_handle);
/* Don't unlock until we've actually closed the BO. The whole point of
* the BO cache is to ensure that we correctly handle races with creating
* again between mutex unlock and closing the GEM handle.
*/
pthread_mutex_unlock(&cache->mutex);
-
- vk_free(&device->alloc, bo);
}