#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
-#include <values.h>
+#include <limits.h>
#include <assert.h>
#include <linux/futex.h>
#include <linux/memfd.h>
#include "anv_private.h"
+#include "util/hash_table.h"
+
#ifdef HAVE_VALGRIND
#define VG_NOACCESS_READ(__ptr) ({ \
VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
static uint32_t
anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state);
-void
+VkResult
anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device, uint32_t block_size)
{
+ VkResult result;
+
assert(util_is_power_of_two(block_size));
pool->device = device;
- pool->bo.gem_handle = 0;
- pool->bo.offset = 0;
- pool->bo.size = 0;
- pool->bo.is_winsys_bo = false;
+ anv_bo_init(&pool->bo, 0, 0);
pool->block_size = block_size;
pool->free_list = ANV_FREE_LIST_EMPTY;
pool->back_free_list = ANV_FREE_LIST_EMPTY;
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
if (pool->fd == -1)
- return;
+ return vk_error(VK_ERROR_INITIALIZATION_FAILED);
/* Just make it 2GB up-front. The Linux kernel won't actually back it
* with pages until we either map and fault on one of them or we use
* userptr and send a chunk of it off to the GPU.
*/
- if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1)
- return;
+ if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_fd;
+ }
- u_vector_init(&pool->mmap_cleanups,
- round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128);
+ if (!u_vector_init(&pool->mmap_cleanups,
+ round_to_power_of_two(sizeof(struct anv_mmap_cleanup)),
+ 128)) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_fd;
+ }
pool->state.next = 0;
pool->state.end = 0;
/* Immediately grow the pool so we'll have a backing bo. */
pool->state.end = anv_block_pool_grow(pool, &pool->state);
+
+ return VK_SUCCESS;
+
+ fail_fd:
+ close(pool->fd);
+
+ return result;
}
void
static uint32_t
anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
{
- size_t size;
+ uint32_t size;
void *map;
uint32_t gem_handle;
struct anv_mmap_cleanup *cleanup;
assert(state == &pool->state || back_used > 0);
- size_t old_size = pool->bo.size;
+ uint32_t old_size = pool->bo.size;
if (old_size != 0 &&
back_used * 2 <= pool->center_bo_offset &&
* values back into pool. */
pool->map = map + center_bo_offset;
pool->center_bo_offset = center_bo_offset;
- pool->bo.gem_handle = gem_handle;
- pool->bo.size = size;
+
+ /* For block pool BOs we have to be a bit careful about where we place them
+ * in the GTT. There are two documented workarounds for state base address
+ * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
+ * which state that those two base addresses do not support 48-bit
+ * addresses and need to be placed in the bottom 32-bit range.
+ * Unfortunately, this is not quite accurate.
+ *
+ * The real problem is that we always set the size of our state pools in
+ * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
+ * likely significantly smaller. We do this because we do not no at the
+ * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
+ * the pool during command buffer building so we don't actually have a
+ * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
+ * overflows 48 bits, the GPU appears to treat all accesses to the buffer
+ * as being out of bounds and returns zero. For dynamic state, this
+ * usually just leads to rendering corruptions, but shaders that are all
+ * zero hang the GPU immediately.
+ *
+ * The easiest solution to do is exactly what the bogus workarounds say to
+ * do: restrict these buffers to 32-bit addresses. We could also pin the
+ * BO to some particular location of our choosing, but that's significantly
+ * more work than just not setting a flag. So, we explicitly DO NOT set
+ * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
+ * hard work for us.
+ */
+ anv_bo_init(&pool->bo, gem_handle, size);
pool->bo.map = map;
- pool->bo.index = 0;
+
+ if (pool->device->instance->physicalDevice.has_exec_async)
+ pool->bo.flags |= EXEC_OBJECT_ASYNC;
done:
pthread_mutex_unlock(&pool->device->mutex);
}
}
-static void
-anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool *pool,
- size_t state_size)
+void
+anv_state_pool_init(struct anv_state_pool *pool,
+ struct anv_block_pool *block_pool)
{
- /* At least a cache line and must divide the block size. */
- assert(state_size >= 64 && util_is_power_of_two(state_size));
+ pool->block_pool = block_pool;
+ for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
+ pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
+ pool->buckets[i].block.next = 0;
+ pool->buckets[i].block.end = 0;
+ }
+ VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
+}
- pool->state_size = state_size;
- pool->free_list = ANV_FREE_LIST_EMPTY;
- pool->block.next = 0;
- pool->block.end = 0;
+void
+anv_state_pool_finish(struct anv_state_pool *pool)
+{
+ VG(VALGRIND_DESTROY_MEMPOOL(pool));
}
static uint32_t
-anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool,
- struct anv_block_pool *block_pool)
+anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
+ struct anv_block_pool *block_pool,
+ uint32_t state_size)
{
- int32_t offset;
struct anv_block_state block, old, new;
+ uint32_t offset;
- /* Try free list first. */
- if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset)) {
- assert(offset >= 0);
- return offset;
- }
-
- /* If free list was empty (or somebody raced us and took the items) we
- * allocate a new item from the end of the block */
restart:
- block.u64 = __sync_fetch_and_add(&pool->block.u64, pool->state_size);
+ block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
if (block.next < block.end) {
return block.next;
} else if (block.next == block.end) {
offset = anv_block_pool_alloc(block_pool);
- new.next = offset + pool->state_size;
+ new.next = offset + state_size;
new.end = offset + block_pool->block_size;
old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
if (old.next != block.next)
}
}
-static void
-anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool *pool,
- struct anv_block_pool *block_pool,
- uint32_t offset)
-{
- anv_free_list_push(&pool->free_list, block_pool->map, offset);
-}
-
-void
-anv_state_pool_init(struct anv_state_pool *pool,
- struct anv_block_pool *block_pool)
-{
- pool->block_pool = block_pool;
- for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
- size_t size = 1 << (ANV_MIN_STATE_SIZE_LOG2 + i);
- anv_fixed_size_state_pool_init(&pool->buckets[i], size);
- }
- VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
-}
-
-void
-anv_state_pool_finish(struct anv_state_pool *pool)
-{
- VG(VALGRIND_DESTROY_MEMPOOL(pool));
-}
-
-struct anv_state
-anv_state_pool_alloc(struct anv_state_pool *pool, size_t size, size_t align)
+static struct anv_state
+anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
+ uint32_t size, uint32_t align)
{
unsigned size_log2 = ilog2_round_up(size < align ? align : size);
assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
struct anv_state state;
state.alloc_size = 1 << size_log2;
- state.offset = anv_fixed_size_state_pool_alloc(&pool->buckets[bucket],
- pool->block_pool);
+
+ /* Try free list first. */
+ if (anv_free_list_pop(&pool->buckets[bucket].free_list,
+ &pool->block_pool->map, &state.offset)) {
+ assert(state.offset >= 0);
+ goto done;
+ }
+
+ state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
+ pool->block_pool,
+ state.alloc_size);
+
+done:
state.map = pool->block_pool->map + state.offset;
+ return state;
+}
+
+struct anv_state
+anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align)
+{
+ if (size == 0)
+ return ANV_STATE_NULL;
+
+ struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align);
VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size));
return state;
}
-void
-anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
+static void
+anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
{
assert(util_is_power_of_two(state.alloc_size));
unsigned size_log2 = ilog2_round_up(state.alloc_size);
size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2;
+ anv_free_list_push(&pool->buckets[bucket].free_list,
+ pool->block_pool->map, state.offset);
+}
+
+void
+anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
+{
+ if (state.alloc_size == 0)
+ return;
+
VG(VALGRIND_MEMPOOL_FREE(pool, state.map));
- anv_fixed_size_state_pool_free(&pool->buckets[bucket],
- pool->block_pool, state.offset);
+ anv_state_pool_free_no_vg(pool, state);
}
-#define NULL_BLOCK 1
struct anv_state_stream_block {
+ struct anv_state block;
+
/* The next block */
struct anv_state_stream_block *next;
- /* The offset into the block pool at which this block starts */
- uint32_t offset;
-
#ifdef HAVE_VALGRIND
/* A pointer to the first user-allocated thing in this block. This is
* what valgrind sees as the start of the block.
*/
void
anv_state_stream_init(struct anv_state_stream *stream,
- struct anv_block_pool *block_pool)
+ struct anv_state_pool *state_pool,
+ uint32_t block_size)
{
- stream->block_pool = block_pool;
- stream->block = NULL;
+ stream->state_pool = state_pool;
+ stream->block_size = block_size;
+
+ stream->block = ANV_STATE_NULL;
- /* Ensure that next + whatever > end. This way the first call to
+ stream->block_list = NULL;
+
+ /* Ensure that next + whatever > block_size. This way the first call to
* state_stream_alloc fetches a new block.
*/
- stream->next = 1;
- stream->end = 0;
+ stream->next = block_size;
VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
}
void
anv_state_stream_finish(struct anv_state_stream *stream)
{
- VG(const uint32_t block_size = stream->block_pool->block_size);
-
- struct anv_state_stream_block *next = stream->block;
+ struct anv_state_stream_block *next = stream->block_list;
while (next != NULL) {
- VG(VALGRIND_MAKE_MEM_DEFINED(next, sizeof(*next)));
struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
- VG(VALGRIND_MAKE_MEM_UNDEFINED(next, block_size));
- anv_block_pool_free(stream->block_pool, sb.offset);
+ VG(VALGRIND_MAKE_MEM_UNDEFINED(next, stream->block_size));
+ anv_state_pool_free_no_vg(stream->state_pool, sb.block);
next = sb.next;
}
anv_state_stream_alloc(struct anv_state_stream *stream,
uint32_t size, uint32_t alignment)
{
- struct anv_state_stream_block *sb = stream->block;
+ if (size == 0)
+ return ANV_STATE_NULL;
- struct anv_state state;
+ assert(alignment <= PAGE_SIZE);
- state.offset = align_u32(stream->next, alignment);
- if (state.offset + size > stream->end) {
- uint32_t block = anv_block_pool_alloc(stream->block_pool);
- sb = stream->block_pool->map + block;
+ uint32_t offset = align_u32(stream->next, alignment);
+ if (offset + size > stream->block_size) {
+ stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
+ stream->block_size,
+ PAGE_SIZE);
- VG(VALGRIND_MAKE_MEM_UNDEFINED(sb, sizeof(*sb)));
- sb->next = stream->block;
- sb->offset = block;
- VG(sb->_vg_ptr = NULL);
- VG(VALGRIND_MAKE_MEM_NOACCESS(sb, stream->block_pool->block_size));
+ struct anv_state_stream_block *sb = stream->block.map;
+ VG_NOACCESS_WRITE(&sb->block, stream->block);
+ VG_NOACCESS_WRITE(&sb->next, stream->block_list);
+ stream->block_list = sb;
+ VG_NOACCESS_WRITE(&sb->_vg_ptr, NULL);
- stream->block = sb;
- stream->start = block;
- stream->next = block + sizeof(*sb);
- stream->end = block + stream->block_pool->block_size;
+ VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, stream->block_size));
- state.offset = align_u32(stream->next, alignment);
- assert(state.offset + size <= stream->end);
+ /* Reset back to the start plus space for the header */
+ stream->next = sizeof(*sb);
+
+ offset = align_u32(stream->next, alignment);
+ assert(offset + size <= stream->block_size);
}
- assert(state.offset > stream->start);
- state.map = (void *)sb + (state.offset - stream->start);
+ struct anv_state state = stream->block;
+ state.offset += offset;
state.alloc_size = size;
+ state.map += offset;
+
+ stream->next = offset + size;
#ifdef HAVE_VALGRIND
+ struct anv_state_stream_block *sb = stream->block_list;
void *vg_ptr = VG_NOACCESS_READ(&sb->_vg_ptr);
if (vg_ptr == NULL) {
vg_ptr = state.map;
}
#endif
- stream->next = state.offset + size;
-
return state;
}
assert(new_bo.size == pow2_size);
new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0);
- if (new_bo.map == NULL) {
+ if (new_bo.map == MAP_FAILED) {
anv_gem_close(pool->device, new_bo.gem_handle);
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
{
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned i = 0; i < 16; i++) {
- struct anv_bo *bo = &pool->bos[i][s];
- if (bo->size > 0)
- anv_gem_close(device, bo->gem_handle);
+ struct anv_scratch_bo *bo = &pool->bos[i][s];
+ if (bo->exists > 0)
+ anv_gem_close(device, bo->bo.gem_handle);
}
}
}
unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
assert(scratch_size_log2 < 16);
- struct anv_bo *bo = &pool->bos[scratch_size_log2][stage];
+ struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage];
+
+ /* We can use "exists" to shortcut and ignore the critical section */
+ if (bo->exists)
+ return &bo->bo;
+
+ pthread_mutex_lock(&device->mutex);
+
+ __sync_synchronize();
+ if (bo->exists)
+ return &bo->bo;
+
+ const struct anv_physical_device *physical_device =
+ &device->instance->physicalDevice;
+ const struct gen_device_info *devinfo = &physical_device->info;
- /* From now on, we go into a critical section. In order to remain
- * thread-safe, we use the bo size as a lock. A value of 0 means we don't
- * have a valid BO yet. A value of 1 means locked. A value greater than 1
- * means we have a bo of the given size.
+ /* WaCSScratchSize:hsw
+ *
+ * Haswell's scratch space address calculation appears to be sparse
+ * rather than tightly packed. The Thread ID has bits indicating which
+ * subslice, EU within a subslice, and thread within an EU it is.
+ * There's a maximum of two slices and two subslices, so these can be
+ * stored with a single bit. Even though there are only 10 EUs per
+ * subslice, this is stored in 4 bits, so there's an effective maximum
+ * value of 16 EUs. Similarly, although there are only 7 threads per EU,
+ * this is stored in a 3 bit number, giving an effective maximum value
+ * of 8 threads per EU.
+ *
+ * This means that we need to use 16 * 8 instead of 10 * 7 for the
+ * number of threads per subslice.
+ */
+ const unsigned subslices = MAX2(physical_device->subslice_total, 1);
+ const unsigned scratch_ids_per_subslice =
+ device->info.is_haswell ? 16 * 8 : devinfo->max_cs_threads;
+
+ uint32_t max_threads[] = {
+ [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
+ [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
+ [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
+ [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
+ [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
+ [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
+ };
+
+ uint32_t size = per_thread_scratch * max_threads[stage];
+
+ anv_bo_init_new(&bo->bo, device, size);
+
+ /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
+ * are still relative to the general state base address. When we emit
+ * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
+ * to the maximum (1 page under 4GB). This allows us to just place the
+ * scratch buffers anywhere we wish in the bottom 32 bits of address space
+ * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
+ * However, in order to do so, we need to ensure that the kernel does not
+ * place the scratch BO above the 32-bit boundary.
+ *
+ * NOTE: Technically, it can't go "anywhere" because the top page is off
+ * limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
+ * kernel allocates space using
+ *
+ * end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
+ *
+ * so nothing will ever touch the top page.
*/
+ bo->bo.flags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
- if (bo->size > 1)
- return bo;
-
- uint64_t size = __sync_val_compare_and_swap(&bo->size, 0, 1);
- if (size == 0) {
- /* We own the lock. Allocate a buffer */
-
- const struct anv_physical_device *physical_device =
- &device->instance->physicalDevice;
- const struct gen_device_info *devinfo = &physical_device->info;
-
- /* WaCSScratchSize:hsw
- *
- * Haswell's scratch space address calculation appears to be sparse
- * rather than tightly packed. The Thread ID has bits indicating which
- * subslice, EU within a subslice, and thread within an EU it is.
- * There's a maximum of two slices and two subslices, so these can be
- * stored with a single bit. Even though there are only 10 EUs per
- * subslice, this is stored in 4 bits, so there's an effective maximum
- * value of 16 EUs. Similarly, although there are only 7 threads per EU,
- * this is stored in a 3 bit number, giving an effective maximum value
- * of 8 threads per EU.
- *
- * This means that we need to use 16 * 8 instead of 10 * 7 for the
- * number of threads per subslice.
- */
- const unsigned subslices = MAX2(physical_device->subslice_total, 1);
- const unsigned scratch_ids_per_subslice =
- device->info.is_haswell ? 16 * 8 : devinfo->max_cs_threads;
+ /* Set the exists last because it may be read by other threads */
+ __sync_synchronize();
+ bo->exists = true;
- uint32_t max_threads[] = {
- [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
- [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
- [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
- [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
- [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
- [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
- };
+ pthread_mutex_unlock(&device->mutex);
- size = per_thread_scratch * max_threads[stage];
+ return &bo->bo;
+}
- struct anv_bo new_bo;
- anv_bo_init_new(&new_bo, device, size);
+struct anv_cached_bo {
+ struct anv_bo bo;
- bo->gem_handle = new_bo.gem_handle;
+ uint32_t refcount;
+};
- /* Set the size last because we use it as a lock */
- __sync_synchronize();
- bo->size = size;
+VkResult
+anv_bo_cache_init(struct anv_bo_cache *cache)
+{
+ cache->bo_map = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ if (!cache->bo_map)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pthread_mutex_init(&cache->mutex, NULL)) {
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ return vk_errorf(VK_ERROR_OUT_OF_HOST_MEMORY,
+ "pthread_mutex_init failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+anv_bo_cache_finish(struct anv_bo_cache *cache)
+{
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ pthread_mutex_destroy(&cache->mutex);
+}
+
+static struct anv_cached_bo *
+anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)gem_handle);
+ if (!entry)
+ return NULL;
+
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
+ assert(bo->bo.gem_handle == gem_handle);
+
+ return bo;
+}
+
+static struct anv_bo *
+anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+
+ pthread_mutex_unlock(&cache->mutex);
- futex_wake((uint32_t *)&bo->size, INT_MAX);
+ return bo ? &bo->bo : NULL;
+}
+
+VkResult
+anv_bo_cache_alloc(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ uint64_t size, struct anv_bo **bo_out)
+{
+ struct anv_cached_bo *bo =
+ vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ bo->refcount = 1;
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ VkResult result = anv_bo_init_new(&bo->bo, device, size);
+ if (result != VK_SUCCESS) {
+ vk_free(&device->alloc, bo);
+ return result;
+ }
+
+ assert(bo->bo.gem_handle);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ _mesa_hash_table_insert(cache->bo_map,
+ (void *)(uintptr_t)bo->bo.gem_handle, bo);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t size, struct anv_bo **bo_out)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
+ if (!gem_handle) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
+ }
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ if (bo->bo.size != size) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
+ }
+ __sync_fetch_and_add(&bo->refcount, 1);
} else {
- /* Someone else got here first */
- while (bo->size == 1)
- futex_wait((uint32_t *)&bo->size, 1);
+ /* For security purposes, we reject BO imports where the size does not
+ * match exactly. This prevents a malicious client from passing a
+ * buffer to a trusted client, lying about the size, and telling the
+ * trusted client to try and texture from an image that goes
+ * out-of-bounds. This sort of thing could lead to GPU hangs or worse
+ * in the trusted client. The trusted client can protect itself against
+ * this sort of attack but only if it can trust the buffer size.
+ */
+ off_t import_size = lseek(fd, 0, SEEK_END);
+ if (import_size == (off_t)-1 || import_size != size) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
+ }
+
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+
+ if (device->instance->physicalDevice.supports_48bit_addresses)
+ bo->bo.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ if (device->instance->physicalDevice.has_exec_async)
+ bo->bo.flags |= EXEC_OBJECT_ASYNC;
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
}
- return bo;
+ pthread_mutex_unlock(&cache->mutex);
+
+ /* From the Vulkan spec:
+ *
+ * "Importing memory from a file descriptor transfers ownership of
+ * the file descriptor from the application to the Vulkan
+ * implementation. The application must not perform any operations on
+ * the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ close(fd);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_export(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in, int *fd_out)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
+ if (fd < 0)
+ return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+
+ *fd_out = fd;
+
+ return VK_SUCCESS;
+}
+
+static bool
+atomic_dec_not_one(uint32_t *counter)
+{
+ uint32_t old, val;
+
+ val = *counter;
+ while (1) {
+ if (val == 1)
+ return false;
+
+ old = __sync_val_compare_and_swap(counter, val, val - 1);
+ if (old == val)
+ return true;
+
+ val = old;
+ }
+}
+
+void
+anv_bo_cache_release(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ /* Try to decrement the counter but don't go below one. If this succeeds
+ * then the refcount has been decremented and we are not the last
+ * reference.
+ */
+ if (atomic_dec_not_one(&bo->refcount))
+ return;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ /* We are probably the last reference since our attempt to decrement above
+ * failed. However, we can't actually know until we are inside the mutex.
+ * Otherwise, someone could import the BO between the decrement and our
+ * taking the mutex.
+ */
+ if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
+ /* Turns out we're not the last reference. Unlock and bail. */
+ pthread_mutex_unlock(&cache->mutex);
+ return;
+ }
+
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)bo->bo.gem_handle);
+ assert(entry);
+ _mesa_hash_table_remove(cache->bo_map, entry);
+
+ if (bo->bo.map)
+ anv_gem_munmap(bo->bo.map, bo->bo.size);
+
+ anv_gem_close(device, bo->bo.gem_handle);
+
+ /* Don't unlock until we've actually closed the BO. The whole point of
+ * the BO cache is to ensure that we correctly handle races with creating
+ * and releasing GEM handles and we don't want to let someone import the BO
+ * again between mutex unlock and closing the GEM handle.
+ */
+ pthread_mutex_unlock(&cache->mutex);
+
+ vk_free(&device->alloc, bo);
}