* IN THE SOFTWARE.
*/
-#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
-#include <values.h>
+#include <limits.h>
#include <assert.h>
-#include <linux/futex.h>
#include <linux/memfd.h>
-#include <sys/time.h>
#include <sys/mman.h>
-#include <sys/syscall.h>
#include "anv_private.h"
+#include "util/hash_table.h"
+#include "util/simple_mtx.h"
+
#ifdef HAVE_VALGRIND
#define VG_NOACCESS_READ(__ptr) ({ \
VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
#define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
-static inline long
-sys_futex(void *addr1, int op, int val1,
- struct timespec *timeout, void *addr2, int val3)
-{
- return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
-}
-
-static inline int
-futex_wake(uint32_t *addr, int count)
-{
- return sys_futex(addr, FUTEX_WAKE, count, NULL, NULL, 0);
-}
-
-static inline int
-futex_wait(uint32_t *addr, int32_t value)
-{
- return sys_futex(addr, FUTEX_WAIT, value, NULL, NULL, 0);
-}
-
+#ifndef HAVE_MEMFD_CREATE
static inline int
memfd_create(const char *name, unsigned int flags)
{
return syscall(SYS_memfd_create, name, flags);
}
+#endif
static inline uint32_t
ilog2_round_up(uint32_t value)
}
static void
-anv_free_list_push(union anv_free_list *list, void *map, int32_t offset)
+anv_free_list_push(union anv_free_list *list, void *map, int32_t offset,
+ uint32_t size, uint32_t count)
{
union anv_free_list current, old, new;
int32_t *next_ptr = map + offset;
+ /* If we're returning more than one chunk, we need to build a chain to add
+ * to the list. Fortunately, we can do this without any atomics since we
+ * own everything in the chain right now. `offset` is left pointing to the
+ * head of our chain list while `next_ptr` points to the tail.
+ */
+ for (uint32_t i = 1; i < count; i++) {
+ VG_NOACCESS_WRITE(next_ptr, offset + i * size);
+ next_ptr = map + offset + i * size;
+ }
+
old = *list;
do {
current = old;
} while (old != current);
}
-static uint32_t
-anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state);
+static VkResult
+anv_block_pool_expand_range(struct anv_block_pool *pool,
+ uint32_t center_bo_offset, uint32_t size);
-void
+VkResult
anv_block_pool_init(struct anv_block_pool *pool,
- struct anv_device *device, uint32_t block_size)
+ struct anv_device *device,
+ uint64_t start_address,
+ uint32_t initial_size,
+ uint64_t bo_flags)
{
- assert(util_is_power_of_two(block_size));
+ VkResult result;
pool->device = device;
- pool->bo.gem_handle = 0;
- pool->bo.offset = 0;
- pool->bo.size = 0;
- pool->bo.is_winsys_bo = false;
- pool->block_size = block_size;
- pool->free_list = ANV_FREE_LIST_EMPTY;
- pool->back_free_list = ANV_FREE_LIST_EMPTY;
+ pool->bo_flags = bo_flags;
+ pool->start_address = gen_canonical_address(start_address);
+
+ anv_bo_init(&pool->bo, 0, 0);
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
if (pool->fd == -1)
- return;
+ return vk_error(VK_ERROR_INITIALIZATION_FAILED);
/* Just make it 2GB up-front. The Linux kernel won't actually back it
* with pages until we either map and fault on one of them or we use
* userptr and send a chunk of it off to the GPU.
*/
- if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1)
- return;
+ if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_fd;
+ }
- anv_vector_init(&pool->mmap_cleanups,
- round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128);
+ if (!u_vector_init(&pool->mmap_cleanups,
+ round_to_power_of_two(sizeof(struct anv_mmap_cleanup)),
+ 128)) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_fd;
+ }
pool->state.next = 0;
pool->state.end = 0;
pool->back_state.next = 0;
pool->back_state.end = 0;
- /* Immediately grow the pool so we'll have a backing bo. */
- pool->state.end = anv_block_pool_grow(pool, &pool->state);
+ result = anv_block_pool_expand_range(pool, 0, initial_size);
+ if (result != VK_SUCCESS)
+ goto fail_mmap_cleanups;
+
+ return VK_SUCCESS;
+
+ fail_mmap_cleanups:
+ u_vector_finish(&pool->mmap_cleanups);
+ fail_fd:
+ close(pool->fd);
+
+ return result;
}
void
{
struct anv_mmap_cleanup *cleanup;
- anv_vector_foreach(cleanup, &pool->mmap_cleanups) {
+ u_vector_foreach(cleanup, &pool->mmap_cleanups) {
if (cleanup->map)
munmap(cleanup->map, cleanup->size);
if (cleanup->gem_handle)
anv_gem_close(pool->device, cleanup->gem_handle);
}
- anv_vector_finish(&pool->mmap_cleanups);
+ u_vector_finish(&pool->mmap_cleanups);
close(pool->fd);
}
#define PAGE_SIZE 4096
+static VkResult
+anv_block_pool_expand_range(struct anv_block_pool *pool,
+ uint32_t center_bo_offset, uint32_t size)
+{
+ void *map;
+ uint32_t gem_handle;
+ struct anv_mmap_cleanup *cleanup;
+
+ /* Assert that we only ever grow the pool */
+ assert(center_bo_offset >= pool->back_state.end);
+ assert(size - center_bo_offset >= pool->state.end);
+
+ /* Assert that we don't go outside the bounds of the memfd */
+ assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
+ assert(size - center_bo_offset <=
+ BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
+
+ cleanup = u_vector_add(&pool->mmap_cleanups);
+ if (!cleanup)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *cleanup = ANV_MMAP_CLEANUP_INIT;
+
+ /* Just leak the old map until we destroy the pool. We can't munmap it
+ * without races or imposing locking on the block allocate fast path. On
+ * the whole the leaked maps adds up to less than the size of the
+ * current map. MAP_POPULATE seems like the right thing to do, but we
+ * should try to get some numbers.
+ */
+ map = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, pool->fd,
+ BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
+ if (map == MAP_FAILED)
+ return vk_errorf(pool->device->instance, pool->device,
+ VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
+
+ gem_handle = anv_gem_userptr(pool->device, map, size);
+ if (gem_handle == 0) {
+ munmap(map, size);
+ return vk_errorf(pool->device->instance, pool->device,
+ VK_ERROR_TOO_MANY_OBJECTS, "userptr failed: %m");
+ }
+
+ cleanup->map = map;
+ cleanup->size = size;
+ cleanup->gem_handle = gem_handle;
+
+#if 0
+ /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
+ * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
+ * always created as I915_CACHING_CACHED, which on non-LLC means
+ * snooped. That can be useful but comes with a bit of overheard. Since
+ * we're eplicitly clflushing and don't want the overhead we need to turn
+ * it off. */
+ if (!pool->device->info.has_llc) {
+ anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE);
+ anv_gem_set_domain(pool->device, gem_handle,
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ }
+#endif
+
+ /* Now that we successfull allocated everything, we can write the new
+ * values back into pool. */
+ pool->map = map + center_bo_offset;
+ pool->center_bo_offset = center_bo_offset;
+
+ /* For block pool BOs we have to be a bit careful about where we place them
+ * in the GTT. There are two documented workarounds for state base address
+ * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
+ * which state that those two base addresses do not support 48-bit
+ * addresses and need to be placed in the bottom 32-bit range.
+ * Unfortunately, this is not quite accurate.
+ *
+ * The real problem is that we always set the size of our state pools in
+ * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
+ * likely significantly smaller. We do this because we do not no at the
+ * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
+ * the pool during command buffer building so we don't actually have a
+ * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
+ * overflows 48 bits, the GPU appears to treat all accesses to the buffer
+ * as being out of bounds and returns zero. For dynamic state, this
+ * usually just leads to rendering corruptions, but shaders that are all
+ * zero hang the GPU immediately.
+ *
+ * The easiest solution to do is exactly what the bogus workarounds say to
+ * do: restrict these buffers to 32-bit addresses. We could also pin the
+ * BO to some particular location of our choosing, but that's significantly
+ * more work than just not setting a flag. So, we explicitly DO NOT set
+ * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
+ * hard work for us.
+ */
+ anv_bo_init(&pool->bo, gem_handle, size);
+ if (pool->bo_flags & EXEC_OBJECT_PINNED) {
+ pool->bo.offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
+ center_bo_offset;
+ }
+ pool->bo.flags = pool->bo_flags;
+ pool->bo.map = map;
+
+ return VK_SUCCESS;
+}
+
/** Grows and re-centers the block pool.
*
* We grow the block pool in one or both directions in such a way that the
static uint32_t
anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
{
- size_t size;
- void *map;
- uint32_t gem_handle;
- struct anv_mmap_cleanup *cleanup;
+ VkResult result = VK_SUCCESS;
pthread_mutex_lock(&pool->device->mutex);
assert(state == &pool->state || back_used > 0);
- size_t old_size = pool->bo.size;
+ uint32_t old_size = pool->bo.size;
+
+ /* The block pool is always initialized to a nonzero size and this function
+ * is always called after initialization.
+ */
+ assert(old_size > 0);
+
+ /* The back_used and front_used may actually be smaller than the actual
+ * requirement because they are based on the next pointers which are
+ * updated prior to calling this function.
+ */
+ uint32_t back_required = MAX2(back_used, pool->center_bo_offset);
+ uint32_t front_required = MAX2(front_used, old_size - pool->center_bo_offset);
- if (old_size != 0 &&
- back_used * 2 <= pool->center_bo_offset &&
- front_used * 2 <= (old_size - pool->center_bo_offset)) {
+ if (back_used * 2 <= back_required && front_used * 2 <= front_required) {
/* If we're in this case then this isn't the firsta allocation and we
* already have enough space on both sides to hold double what we
* have allocated. There's nothing for us to do.
goto done;
}
- if (old_size == 0) {
- /* This is the first allocation */
- size = MAX2(32 * pool->block_size, PAGE_SIZE);
- } else {
- size = old_size * 2;
- }
+ uint32_t size = old_size * 2;
+ while (size < back_required + front_required)
+ size *= 2;
- /* We can't have a block pool bigger than 1GB because we use signed
- * 32-bit offsets in the free list and we don't want overflow. We
- * should never need a block pool bigger than 1GB anyway.
- */
- assert(size <= (1u << 31));
+ assert(size > pool->bo.size);
/* We compute a new center_bo_offset such that, when we double the size
* of the pool, we maintain the ratio of how much is used by each side.
*/
center_bo_offset = ((uint64_t)size * back_used) / total_used;
- /* Align down to a multiple of both the block size and page size */
- uint32_t granularity = MAX2(pool->block_size, PAGE_SIZE);
- assert(util_is_power_of_two(granularity));
- center_bo_offset &= ~(granularity - 1);
+ /* Align down to a multiple of the page size */
+ center_bo_offset &= ~(PAGE_SIZE - 1);
assert(center_bo_offset >= back_used);
/* Make sure we don't shrink the back end of the pool */
- if (center_bo_offset < pool->back_state.end)
- center_bo_offset = pool->back_state.end;
+ if (center_bo_offset < back_required)
+ center_bo_offset = back_required;
/* Make sure that we don't shrink the front end of the pool */
- if (size - center_bo_offset < pool->state.end)
- center_bo_offset = size - pool->state.end;
+ if (size - center_bo_offset < front_required)
+ center_bo_offset = size - front_required;
}
- assert(center_bo_offset % pool->block_size == 0);
assert(center_bo_offset % PAGE_SIZE == 0);
- /* Assert that we only ever grow the pool */
- assert(center_bo_offset >= pool->back_state.end);
- assert(size - center_bo_offset >= pool->state.end);
-
- cleanup = anv_vector_add(&pool->mmap_cleanups);
- if (!cleanup)
- goto fail;
- *cleanup = ANV_MMAP_CLEANUP_INIT;
-
- /* Just leak the old map until we destroy the pool. We can't munmap it
- * without races or imposing locking on the block allocate fast path. On
- * the whole the leaked maps adds up to less than the size of the
- * current map. MAP_POPULATE seems like the right thing to do, but we
- * should try to get some numbers.
- */
- map = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, pool->fd,
- BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
- cleanup->map = map;
- cleanup->size = size;
-
- if (map == MAP_FAILED)
- goto fail;
-
- gem_handle = anv_gem_userptr(pool->device, map, size);
- if (gem_handle == 0)
- goto fail;
- cleanup->gem_handle = gem_handle;
-
-#if 0
- /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
- * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
- * always created as I915_CACHING_CACHED, which on non-LLC means
- * snooped. That can be useful but comes with a bit of overheard. Since
- * we're eplicitly clflushing and don't want the overhead we need to turn
- * it off. */
- if (!pool->device->info.has_llc) {
- anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE);
- anv_gem_set_domain(pool->device, gem_handle,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
- }
-#endif
+ result = anv_block_pool_expand_range(pool, center_bo_offset, size);
- /* Now that we successfull allocated everything, we can write the new
- * values back into pool. */
- pool->map = map + center_bo_offset;
- pool->center_bo_offset = center_bo_offset;
- pool->bo.gem_handle = gem_handle;
- pool->bo.size = size;
- pool->bo.map = map;
- pool->bo.index = 0;
+ pool->bo.flags = pool->bo_flags;
done:
pthread_mutex_unlock(&pool->device->mutex);
- /* Return the appropreate new size. This function never actually
- * updates state->next. Instead, we let the caller do that because it
- * needs to do so in order to maintain its concurrency model.
- */
- if (state == &pool->state) {
- return pool->bo.size - pool->center_bo_offset;
+ if (result == VK_SUCCESS) {
+ /* Return the appropriate new size. This function never actually
+ * updates state->next. Instead, we let the caller do that because it
+ * needs to do so in order to maintain its concurrency model.
+ */
+ if (state == &pool->state) {
+ return pool->bo.size - pool->center_bo_offset;
+ } else {
+ assert(pool->center_bo_offset > 0);
+ return pool->center_bo_offset;
+ }
} else {
- assert(pool->center_bo_offset > 0);
- return pool->center_bo_offset;
+ return 0;
}
-
-fail:
- pthread_mutex_unlock(&pool->device->mutex);
-
- return 0;
}
static uint32_t
anv_block_pool_alloc_new(struct anv_block_pool *pool,
- struct anv_block_state *pool_state)
+ struct anv_block_state *pool_state,
+ uint32_t block_size)
{
struct anv_block_state state, old, new;
while (1) {
- state.u64 = __sync_fetch_and_add(&pool_state->u64, pool->block_size);
- if (state.next < state.end) {
+ state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
+ if (state.next + block_size <= state.end) {
assert(pool->map);
return state.next;
- } else if (state.next == state.end) {
- /* We allocated the first block outside the pool, we have to grow it.
- * pool_state->next acts a mutex: threads who try to allocate now will
- * get block indexes above the current limit and hit futex_wait
- * below. */
- new.next = state.next + pool->block_size;
- new.end = anv_block_pool_grow(pool, pool_state);
- assert(new.end >= new.next && new.end % pool->block_size == 0);
+ } else if (state.next <= state.end) {
+ /* We allocated the first block outside the pool so we have to grow
+ * the pool. pool_state->next acts a mutex: threads who try to
+ * allocate now will get block indexes above the current limit and
+ * hit futex_wait below.
+ */
+ new.next = state.next + block_size;
+ do {
+ new.end = anv_block_pool_grow(pool, pool_state);
+ } while (new.end < new.next);
+
old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
if (old.next != state.next)
futex_wake(&pool_state->end, INT_MAX);
return state.next;
} else {
- futex_wait(&pool_state->end, state.end);
+ futex_wait(&pool_state->end, state.end, NULL);
continue;
}
}
}
int32_t
-anv_block_pool_alloc(struct anv_block_pool *pool)
+anv_block_pool_alloc(struct anv_block_pool *pool,
+ uint32_t block_size)
{
- int32_t offset;
-
- /* Try free list first. */
- if (anv_free_list_pop(&pool->free_list, &pool->map, &offset)) {
- assert(offset >= 0);
- assert(pool->map);
- return offset;
- }
-
- return anv_block_pool_alloc_new(pool, &pool->state);
+ return anv_block_pool_alloc_new(pool, &pool->state, block_size);
}
/* Allocates a block out of the back of the block pool.
* gymnastics with the block pool's BO when doing relocations.
*/
int32_t
-anv_block_pool_alloc_back(struct anv_block_pool *pool)
+anv_block_pool_alloc_back(struct anv_block_pool *pool,
+ uint32_t block_size)
{
- int32_t offset;
-
- /* Try free list first. */
- if (anv_free_list_pop(&pool->back_free_list, &pool->map, &offset)) {
- assert(offset < 0);
- assert(pool->map);
- return offset;
- }
-
- offset = anv_block_pool_alloc_new(pool, &pool->back_state);
+ int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
+ block_size);
/* The offset we get out of anv_block_pool_alloc_new() is actually the
* number of bytes downwards from the middle to the end of the block.
* start of the block.
*/
assert(offset >= 0);
- return -(offset + pool->block_size);
+ return -(offset + block_size);
}
-void
-anv_block_pool_free(struct anv_block_pool *pool, int32_t offset)
+VkResult
+anv_state_pool_init(struct anv_state_pool *pool,
+ struct anv_device *device,
+ uint64_t start_address,
+ uint32_t block_size,
+ uint64_t bo_flags)
{
- if (offset < 0) {
- anv_free_list_push(&pool->back_free_list, pool->map, offset);
- } else {
- anv_free_list_push(&pool->free_list, pool->map, offset);
+ VkResult result = anv_block_pool_init(&pool->block_pool, device,
+ start_address,
+ block_size * 16,
+ bo_flags);
+ if (result != VK_SUCCESS)
+ return result;
+
+ assert(util_is_power_of_two_or_zero(block_size));
+ pool->block_size = block_size;
+ pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
+ for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
+ pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
+ pool->buckets[i].block.next = 0;
+ pool->buckets[i].block.end = 0;
}
+ VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
+
+ return VK_SUCCESS;
}
-static void
-anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool *pool,
- size_t state_size)
+void
+anv_state_pool_finish(struct anv_state_pool *pool)
{
- /* At least a cache line and must divide the block size. */
- assert(state_size >= 64 && util_is_power_of_two(state_size));
-
- pool->state_size = state_size;
- pool->free_list = ANV_FREE_LIST_EMPTY;
- pool->block.next = 0;
- pool->block.end = 0;
+ VG(VALGRIND_DESTROY_MEMPOOL(pool));
+ anv_block_pool_finish(&pool->block_pool);
}
static uint32_t
-anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool,
- struct anv_block_pool *block_pool)
+anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
+ struct anv_block_pool *block_pool,
+ uint32_t state_size,
+ uint32_t block_size)
{
- int32_t offset;
struct anv_block_state block, old, new;
+ uint32_t offset;
- /* Try free list first. */
- if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset)) {
- assert(offset >= 0);
- return offset;
- }
+ /* If our state is large, we don't need any sub-allocation from a block.
+ * Instead, we just grab whole (potentially large) blocks.
+ */
+ if (state_size >= block_size)
+ return anv_block_pool_alloc(block_pool, state_size);
- /* If free list was empty (or somebody raced us and took the items) we
- * allocate a new item from the end of the block */
restart:
- block.u64 = __sync_fetch_and_add(&pool->block.u64, pool->state_size);
+ block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
if (block.next < block.end) {
return block.next;
} else if (block.next == block.end) {
- offset = anv_block_pool_alloc(block_pool);
- new.next = offset + pool->state_size;
- new.end = offset + block_pool->block_size;
+ offset = anv_block_pool_alloc(block_pool, block_size);
+ new.next = offset + state_size;
+ new.end = offset + block_size;
old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
if (old.next != block.next)
futex_wake(&pool->block.end, INT_MAX);
return offset;
} else {
- futex_wait(&pool->block.end, block.end);
+ futex_wait(&pool->block.end, block.end, NULL);
goto restart;
}
}
-static void
-anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool *pool,
- struct anv_block_pool *block_pool,
- uint32_t offset)
+static uint32_t
+anv_state_pool_get_bucket(uint32_t size)
{
- anv_free_list_push(&pool->free_list, block_pool->map, offset);
+ unsigned size_log2 = ilog2_round_up(size);
+ assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
+ if (size_log2 < ANV_MIN_STATE_SIZE_LOG2)
+ size_log2 = ANV_MIN_STATE_SIZE_LOG2;
+ return size_log2 - ANV_MIN_STATE_SIZE_LOG2;
}
-void
-anv_state_pool_init(struct anv_state_pool *pool,
- struct anv_block_pool *block_pool)
+static uint32_t
+anv_state_pool_get_bucket_size(uint32_t bucket)
{
- pool->block_pool = block_pool;
- for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
- size_t size = 1 << (ANV_MIN_STATE_SIZE_LOG2 + i);
- anv_fixed_size_state_pool_init(&pool->buckets[i], size);
- }
- VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
+ uint32_t size_log2 = bucket + ANV_MIN_STATE_SIZE_LOG2;
+ return 1 << size_log2;
}
-void
-anv_state_pool_finish(struct anv_state_pool *pool)
+static struct anv_state
+anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
+ uint32_t size, uint32_t align)
{
- VG(VALGRIND_DESTROY_MEMPOOL(pool));
+ uint32_t bucket = anv_state_pool_get_bucket(MAX2(size, align));
+
+ struct anv_state state;
+ state.alloc_size = anv_state_pool_get_bucket_size(bucket);
+
+ /* Try free list first. */
+ if (anv_free_list_pop(&pool->buckets[bucket].free_list,
+ &pool->block_pool.map, &state.offset)) {
+ assert(state.offset >= 0);
+ goto done;
+ }
+
+ /* Try to grab a chunk from some larger bucket and split it up */
+ for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
+ int32_t chunk_offset;
+ if (anv_free_list_pop(&pool->buckets[b].free_list,
+ &pool->block_pool.map, &chunk_offset)) {
+ unsigned chunk_size = anv_state_pool_get_bucket_size(b);
+
+ /* We've found a chunk that's larger than the requested state size.
+ * There are a couple of options as to what we do with it:
+ *
+ * 1) We could fully split the chunk into state.alloc_size sized
+ * pieces. However, this would mean that allocating a 16B
+ * state could potentially split a 2MB chunk into 512K smaller
+ * chunks. This would lead to unnecessary fragmentation.
+ *
+ * 2) The classic "buddy allocator" method would have us split the
+ * chunk in half and return one half. Then we would split the
+ * remaining half in half and return one half, and repeat as
+ * needed until we get down to the size we want. However, if
+ * you are allocating a bunch of the same size state (which is
+ * the common case), this means that every other allocation has
+ * to go up a level and every fourth goes up two levels, etc.
+ * This is not nearly as efficient as it could be if we did a
+ * little more work up-front.
+ *
+ * 3) Split the difference between (1) and (2) by doing a
+ * two-level split. If it's bigger than some fixed block_size,
+ * we split it into block_size sized chunks and return all but
+ * one of them. Then we split what remains into
+ * state.alloc_size sized chunks and return all but one.
+ *
+ * We choose option (3).
+ */
+ if (chunk_size > pool->block_size &&
+ state.alloc_size < pool->block_size) {
+ assert(chunk_size % pool->block_size == 0);
+ /* We don't want to split giant chunks into tiny chunks. Instead,
+ * break anything bigger than a block into block-sized chunks and
+ * then break it down into bucket-sized chunks from there. Return
+ * all but the first block of the chunk to the block bucket.
+ */
+ const uint32_t block_bucket =
+ anv_state_pool_get_bucket(pool->block_size);
+ anv_free_list_push(&pool->buckets[block_bucket].free_list,
+ pool->block_pool.map,
+ chunk_offset + pool->block_size,
+ pool->block_size,
+ (chunk_size / pool->block_size) - 1);
+ chunk_size = pool->block_size;
+ }
+
+ assert(chunk_size % state.alloc_size == 0);
+ anv_free_list_push(&pool->buckets[bucket].free_list,
+ pool->block_pool.map,
+ chunk_offset + state.alloc_size,
+ state.alloc_size,
+ (chunk_size / state.alloc_size) - 1);
+
+ state.offset = chunk_offset;
+ goto done;
+ }
+ }
+
+ state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
+ &pool->block_pool,
+ state.alloc_size,
+ pool->block_size);
+
+done:
+ state.map = pool->block_pool.map + state.offset;
+ return state;
}
struct anv_state
-anv_state_pool_alloc(struct anv_state_pool *pool, size_t size, size_t align)
+anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align)
{
- unsigned size_log2 = ilog2_round_up(size < align ? align : size);
- assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
- if (size_log2 < ANV_MIN_STATE_SIZE_LOG2)
- size_log2 = ANV_MIN_STATE_SIZE_LOG2;
- unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2;
+ if (size == 0)
+ return ANV_STATE_NULL;
- struct anv_state state;
- state.alloc_size = 1 << size_log2;
- state.offset = anv_fixed_size_state_pool_alloc(&pool->buckets[bucket],
- pool->block_pool);
- state.map = pool->block_pool->map + state.offset;
+ struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align);
VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size));
return state;
}
+struct anv_state
+anv_state_pool_alloc_back(struct anv_state_pool *pool)
+{
+ struct anv_state state;
+ state.alloc_size = pool->block_size;
+
+ if (anv_free_list_pop(&pool->back_alloc_free_list,
+ &pool->block_pool.map, &state.offset)) {
+ assert(state.offset < 0);
+ goto done;
+ }
+
+ state.offset = anv_block_pool_alloc_back(&pool->block_pool,
+ pool->block_size);
+
+done:
+ state.map = pool->block_pool.map + state.offset;
+ VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
+ return state;
+}
+
+static void
+anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
+{
+ assert(util_is_power_of_two_or_zero(state.alloc_size));
+ unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
+
+ if (state.offset < 0) {
+ assert(state.alloc_size == pool->block_size);
+ anv_free_list_push(&pool->back_alloc_free_list,
+ pool->block_pool.map, state.offset,
+ state.alloc_size, 1);
+ } else {
+ anv_free_list_push(&pool->buckets[bucket].free_list,
+ pool->block_pool.map, state.offset,
+ state.alloc_size, 1);
+ }
+}
+
void
anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
{
- assert(util_is_power_of_two(state.alloc_size));
- unsigned size_log2 = ilog2_round_up(state.alloc_size);
- assert(size_log2 >= ANV_MIN_STATE_SIZE_LOG2 &&
- size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
- unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2;
+ if (state.alloc_size == 0)
+ return;
VG(VALGRIND_MEMPOOL_FREE(pool, state.map));
- anv_fixed_size_state_pool_free(&pool->buckets[bucket],
- pool->block_pool, state.offset);
+ anv_state_pool_free_no_vg(pool, state);
}
-#define NULL_BLOCK 1
struct anv_state_stream_block {
+ struct anv_state block;
+
/* The next block */
struct anv_state_stream_block *next;
- /* The offset into the block pool at which this block starts */
- uint32_t offset;
-
#ifdef HAVE_VALGRIND
/* A pointer to the first user-allocated thing in this block. This is
* what valgrind sees as the start of the block.
*/
void
anv_state_stream_init(struct anv_state_stream *stream,
- struct anv_block_pool *block_pool)
+ struct anv_state_pool *state_pool,
+ uint32_t block_size)
{
- stream->block_pool = block_pool;
- stream->block = NULL;
+ stream->state_pool = state_pool;
+ stream->block_size = block_size;
- /* Ensure that next + whatever > end. This way the first call to
+ stream->block = ANV_STATE_NULL;
+
+ stream->block_list = NULL;
+
+ /* Ensure that next + whatever > block_size. This way the first call to
* state_stream_alloc fetches a new block.
*/
- stream->next = 1;
- stream->end = 0;
+ stream->next = block_size;
VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
}
void
anv_state_stream_finish(struct anv_state_stream *stream)
{
- VG(const uint32_t block_size = stream->block_pool->block_size);
-
- struct anv_state_stream_block *next = stream->block;
+ struct anv_state_stream_block *next = stream->block_list;
while (next != NULL) {
- VG(VALGRIND_MAKE_MEM_DEFINED(next, sizeof(*next)));
struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
- VG(VALGRIND_MAKE_MEM_UNDEFINED(next, block_size));
- anv_block_pool_free(stream->block_pool, sb.offset);
+ VG(VALGRIND_MAKE_MEM_UNDEFINED(next, stream->block_size));
+ anv_state_pool_free_no_vg(stream->state_pool, sb.block);
next = sb.next;
}
anv_state_stream_alloc(struct anv_state_stream *stream,
uint32_t size, uint32_t alignment)
{
- struct anv_state_stream_block *sb = stream->block;
+ if (size == 0)
+ return ANV_STATE_NULL;
- struct anv_state state;
+ assert(alignment <= PAGE_SIZE);
+
+ uint32_t offset = align_u32(stream->next, alignment);
+ if (offset + size > stream->block.alloc_size) {
+ uint32_t block_size = stream->block_size;
+ if (block_size < size)
+ block_size = round_to_power_of_two(size);
+
+ stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
+ block_size, PAGE_SIZE);
- state.offset = align_u32(stream->next, alignment);
- if (state.offset + size > stream->end) {
- uint32_t block = anv_block_pool_alloc(stream->block_pool);
- sb = stream->block_pool->map + block;
+ struct anv_state_stream_block *sb = stream->block.map;
+ VG_NOACCESS_WRITE(&sb->block, stream->block);
+ VG_NOACCESS_WRITE(&sb->next, stream->block_list);
+ stream->block_list = sb;
+ VG(VG_NOACCESS_WRITE(&sb->_vg_ptr, NULL));
- VG(VALGRIND_MAKE_MEM_UNDEFINED(sb, sizeof(*sb)));
- sb->next = stream->block;
- sb->offset = block;
- VG(sb->_vg_ptr = NULL);
- VG(VALGRIND_MAKE_MEM_NOACCESS(sb, stream->block_pool->block_size));
+ VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, stream->block_size));
- stream->block = sb;
- stream->start = block;
- stream->next = block + sizeof(*sb);
- stream->end = block + stream->block_pool->block_size;
+ /* Reset back to the start plus space for the header */
+ stream->next = sizeof(*sb);
- state.offset = align_u32(stream->next, alignment);
- assert(state.offset + size <= stream->end);
+ offset = align_u32(stream->next, alignment);
+ assert(offset + size <= stream->block.alloc_size);
}
- assert(state.offset > stream->start);
- state.map = (void *)sb + (state.offset - stream->start);
+ struct anv_state state = stream->block;
+ state.offset += offset;
state.alloc_size = size;
+ state.map += offset;
+
+ stream->next = offset + size;
#ifdef HAVE_VALGRIND
+ struct anv_state_stream_block *sb = stream->block_list;
void *vg_ptr = VG_NOACCESS_READ(&sb->_vg_ptr);
if (vg_ptr == NULL) {
vg_ptr = state.map;
}
#endif
- stream->next = state.offset + size;
-
return state;
}
};
void
-anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device)
+anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
+ uint64_t bo_flags)
{
pool->device = device;
+ pool->bo_flags = bo_flags;
memset(pool->free_list, 0, sizeof(pool->free_list));
VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
if (anv_ptr_free_list_pop(&pool->free_list[bucket], &next_free_void)) {
struct bo_pool_bo_link *next_free = next_free_void;
*bo = VG_NOACCESS_READ(&next_free->bo);
+ assert(bo->gem_handle);
assert(bo->map == next_free);
assert(size <= bo->size);
if (result != VK_SUCCESS)
return result;
+ new_bo.flags = pool->bo_flags;
+
assert(new_bo.size == pow2_size);
new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0);
- if (new_bo.map == NULL) {
+ if (new_bo.map == MAP_FAILED) {
anv_gem_close(pool->device, new_bo.gem_handle);
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
struct bo_pool_bo_link *link = bo.map;
VG_NOACCESS_WRITE(&link->bo, bo);
- assert(util_is_power_of_two(bo.size));
+ assert(util_is_power_of_two_or_zero(bo.size));
const unsigned size_log2 = ilog2_round_up(bo.size);
const unsigned bucket = size_log2 - 12;
assert(bucket < ARRAY_SIZE(pool->free_list));
{
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned i = 0; i < 16; i++) {
- struct anv_bo *bo = &pool->bos[i][s];
- if (bo->size > 0)
- anv_gem_close(device, bo->gem_handle);
+ struct anv_scratch_bo *bo = &pool->bos[i][s];
+ if (bo->exists > 0)
+ anv_gem_close(device, bo->bo.gem_handle);
}
}
}
unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
assert(scratch_size_log2 < 16);
- struct anv_bo *bo = &pool->bos[scratch_size_log2][stage];
+ struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage];
- /* From now on, we go into a critical section. In order to remain
- * thread-safe, we use the bo size as a lock. A value of 0 means we don't
- * have a valid BO yet. A value of 1 means locked. A value greater than 1
- * means we have a bo of the given size.
- */
+ /* We can use "exists" to shortcut and ignore the critical section */
+ if (bo->exists)
+ return &bo->bo;
+
+ pthread_mutex_lock(&device->mutex);
- if (bo->size > 1)
- return bo;
+ __sync_synchronize();
+ if (bo->exists) {
+ pthread_mutex_unlock(&device->mutex);
+ return &bo->bo;
+ }
- uint64_t size = __sync_val_compare_and_swap(&bo->size, 0, 1);
- if (size == 0) {
- /* We own the lock. Allocate a buffer */
+ const struct anv_physical_device *physical_device =
+ &device->instance->physicalDevice;
+ const struct gen_device_info *devinfo = &physical_device->info;
- const struct anv_physical_device *physical_device =
- &device->instance->physicalDevice;
- const struct gen_device_info *devinfo = &physical_device->info;
+ const unsigned subslices = MAX2(physical_device->subslice_total, 1);
+ unsigned scratch_ids_per_subslice;
+ if (devinfo->is_haswell) {
/* WaCSScratchSize:hsw
*
* Haswell's scratch space address calculation appears to be sparse
- * rather than tightly packed. The Thread ID has bits indicating which
- * subslice, EU within a subslice, and thread within an EU it is.
- * There's a maximum of two slices and two subslices, so these can be
- * stored with a single bit. Even though there are only 10 EUs per
- * subslice, this is stored in 4 bits, so there's an effective maximum
- * value of 16 EUs. Similarly, although there are only 7 threads per EU,
- * this is stored in a 3 bit number, giving an effective maximum value
- * of 8 threads per EU.
+ * rather than tightly packed. The Thread ID has bits indicating
+ * which subslice, EU within a subslice, and thread within an EU it
+ * is. There's a maximum of two slices and two subslices, so these
+ * can be stored with a single bit. Even though there are only 10 EUs
+ * per subslice, this is stored in 4 bits, so there's an effective
+ * maximum value of 16 EUs. Similarly, although there are only 7
+ * threads per EU, this is stored in a 3 bit number, giving an
+ * effective maximum value of 8 threads per EU.
*
* This means that we need to use 16 * 8 instead of 10 * 7 for the
* number of threads per subslice.
*/
- const unsigned subslices = MAX2(physical_device->subslice_total, 1);
- const unsigned scratch_ids_per_subslice =
- device->info.is_haswell ? 16 * 8 : devinfo->max_cs_threads;
+ scratch_ids_per_subslice = 16 * 8;
+ } else if (devinfo->is_cherryview) {
+ /* Cherryview devices have either 6 or 8 EUs per subslice, and each EU
+ * has 7 threads. The 6 EU devices appear to calculate thread IDs as if
+ * it had 8 EUs.
+ */
+ scratch_ids_per_subslice = 8 * 7;
+ } else {
+ scratch_ids_per_subslice = devinfo->max_cs_threads;
+ }
+
+ uint32_t max_threads[] = {
+ [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
+ [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
+ [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
+ [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
+ [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
+ [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
+ };
+
+ uint32_t size = per_thread_scratch * max_threads[stage];
+
+ anv_bo_init_new(&bo->bo, device, size);
+
+ /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
+ * are still relative to the general state base address. When we emit
+ * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
+ * to the maximum (1 page under 4GB). This allows us to just place the
+ * scratch buffers anywhere we wish in the bottom 32 bits of address space
+ * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
+ * However, in order to do so, we need to ensure that the kernel does not
+ * place the scratch BO above the 32-bit boundary.
+ *
+ * NOTE: Technically, it can't go "anywhere" because the top page is off
+ * limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
+ * kernel allocates space using
+ *
+ * end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
+ *
+ * so nothing will ever touch the top page.
+ */
+ assert(!(bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS));
- uint32_t max_threads[] = {
- [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
- [MESA_SHADER_TESS_CTRL] = devinfo->max_hs_threads,
- [MESA_SHADER_TESS_EVAL] = devinfo->max_ds_threads,
- [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
- [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
- [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
- };
+ if (device->instance->physicalDevice.has_exec_async)
+ bo->bo.flags |= EXEC_OBJECT_ASYNC;
- size = per_thread_scratch * max_threads[stage];
+ /* Set the exists last because it may be read by other threads */
+ __sync_synchronize();
+ bo->exists = true;
- struct anv_bo new_bo;
- anv_bo_init_new(&new_bo, device, size);
+ pthread_mutex_unlock(&device->mutex);
- bo->gem_handle = new_bo.gem_handle;
+ return &bo->bo;
+}
- /* Set the size last because we use it as a lock */
- __sync_synchronize();
- bo->size = size;
+struct anv_cached_bo {
+ struct anv_bo bo;
- futex_wake((uint32_t *)&bo->size, INT_MAX);
- } else {
- /* Someone else got here first */
- while (bo->size == 1)
- futex_wait((uint32_t *)&bo->size, 1);
+ uint32_t refcount;
+};
+
+VkResult
+anv_bo_cache_init(struct anv_bo_cache *cache)
+{
+ cache->bo_map = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ if (!cache->bo_map)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pthread_mutex_init(&cache->mutex, NULL)) {
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ return vk_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
+ "pthread_mutex_init failed: %m");
}
+ return VK_SUCCESS;
+}
+
+void
+anv_bo_cache_finish(struct anv_bo_cache *cache)
+{
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ pthread_mutex_destroy(&cache->mutex);
+}
+
+static struct anv_cached_bo *
+anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)gem_handle);
+ if (!entry)
+ return NULL;
+
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
+ assert(bo->bo.gem_handle == gem_handle);
+
return bo;
}
+
+UNUSED static struct anv_bo *
+anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ return bo ? &bo->bo : NULL;
+}
+
+#define ANV_BO_CACHE_SUPPORTED_FLAGS \
+ (EXEC_OBJECT_WRITE | \
+ EXEC_OBJECT_ASYNC | \
+ EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
+ EXEC_OBJECT_PINNED)
+
+VkResult
+anv_bo_cache_alloc(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ uint64_t size, uint64_t bo_flags,
+ struct anv_bo **bo_out)
+{
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+
+ struct anv_cached_bo *bo =
+ vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ bo->refcount = 1;
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ VkResult result = anv_bo_init_new(&bo->bo, device, size);
+ if (result != VK_SUCCESS) {
+ vk_free(&device->alloc, bo);
+ return result;
+ }
+
+ bo->bo.flags = bo_flags;
+
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
+ assert(bo->bo.gem_handle);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ _mesa_hash_table_insert(cache->bo_map,
+ (void *)(uintptr_t)bo->bo.gem_handle, bo);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t bo_flags,
+ struct anv_bo **bo_out)
+{
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+
+ pthread_mutex_lock(&cache->mutex);
+
+ uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
+ if (!gem_handle) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ /* We have to be careful how we combine flags so that it makes sense.
+ * Really, though, if we get to this case and it actually matters, the
+ * client has imported a BO twice in different ways and they get what
+ * they have coming.
+ */
+ uint64_t new_flags = 0;
+ new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_WRITE;
+ new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_ASYNC;
+ new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_PINNED;
+
+ /* It's theoretically possible for a BO to get imported such that it's
+ * both pinned and not pinned. The only way this can happen is if it
+ * gets imported as both a semaphore and a memory object and that would
+ * be an application error. Just fail out in that case.
+ */
+ if ((bo->bo.flags & EXEC_OBJECT_PINNED) !=
+ (bo_flags & EXEC_OBJECT_PINNED)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported two different ways");
+ }
+
+ /* It's also theoretically possible that someone could export a BO from
+ * one heap and import it into another or to import the same BO into two
+ * different heaps. If this happens, we could potentially end up both
+ * allowing and disallowing 48-bit addresses. There's not much we can
+ * do about it if we're pinning so we just throw an error and hope no
+ * app is actually that stupid.
+ */
+ if ((new_flags & EXEC_OBJECT_PINNED) &&
+ (bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
+ (bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported on two different heaps");
+ }
+
+ bo->bo.flags = new_flags;
+
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ off_t size = lseek(fd, 0, SEEK_END);
+ if (size == (off_t)-1) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
+
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+ bo->bo.flags = bo_flags;
+
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_export(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in, int *fd_out)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
+ if (fd < 0)
+ return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+
+ *fd_out = fd;
+
+ return VK_SUCCESS;
+}
+
+static bool
+atomic_dec_not_one(uint32_t *counter)
+{
+ uint32_t old, val;
+
+ val = *counter;
+ while (1) {
+ if (val == 1)
+ return false;
+
+ old = __sync_val_compare_and_swap(counter, val, val - 1);
+ if (old == val)
+ return true;
+
+ val = old;
+ }
+}
+
+void
+anv_bo_cache_release(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ /* Try to decrement the counter but don't go below one. If this succeeds
+ * then the refcount has been decremented and we are not the last
+ * reference.
+ */
+ if (atomic_dec_not_one(&bo->refcount))
+ return;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ /* We are probably the last reference since our attempt to decrement above
+ * failed. However, we can't actually know until we are inside the mutex.
+ * Otherwise, someone could import the BO between the decrement and our
+ * taking the mutex.
+ */
+ if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
+ /* Turns out we're not the last reference. Unlock and bail. */
+ pthread_mutex_unlock(&cache->mutex);
+ return;
+ }
+
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)bo->bo.gem_handle);
+ assert(entry);
+ _mesa_hash_table_remove(cache->bo_map, entry);
+
+ if (bo->bo.map)
+ anv_gem_munmap(bo->bo.map, bo->bo.size);
+
+ anv_vma_free(device, &bo->bo);
+
+ anv_gem_close(device, bo->bo.gem_handle);
+
+ /* Don't unlock until we've actually closed the BO. The whole point of
+ * the BO cache is to ensure that we correctly handle races with creating
+ * and releasing GEM handles and we don't want to let someone import the BO
+ * again between mutex unlock and closing the GEM handle.
+ */
+ pthread_mutex_unlock(&cache->mutex);
+
+ vk_free(&device->alloc, bo);
+}