#include "anv_private.h"
-#include "util/simple_mtx.h"
+#include "common/gen_aux_map.h"
#include "util/anon_file.h"
#ifdef HAVE_VALGRIND
map = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, table->fd, 0);
if (map == MAP_FAILED) {
- return vk_errorf(table->device->instance, table->device,
+ return vk_errorf(table->device, table->device,
VK_ERROR_OUT_OF_HOST_MEMORY, "mmap failed: %m");
}
VkResult result;
pool->device = device;
- pool->use_softpin = device->instance->physicalDevice.use_softpin;
+ pool->use_softpin = device->physical->use_softpin;
pool->nbos = 0;
pool->size = 0;
pool->center_bo_offset = 0;
if (pool->fd == -1)
return vk_error(VK_ERROR_INITIALIZATION_FAILED);
- anv_bo_init(&pool->wrapper_bo, 0, 0);
- pool->wrapper_bo.is_wrapper = true;
+ pool->wrapper_bo = (struct anv_bo) {
+ .refcount = 1,
+ .offset = -1,
+ .is_wrapper = true,
+ };
pool->bo = &pool->wrapper_bo;
}
{
anv_block_pool_foreach_bo(bo, pool) {
if (bo->map)
- anv_gem_munmap(bo->map, bo->size);
+ anv_gem_munmap(pool->device, bo->map, bo->size);
anv_gem_close(pool->device, bo->gem_handle);
}
* hard work for us. When using softpin, we're in control and the fixed
* addresses we choose are fine for base addresses.
*/
- enum anv_bo_alloc_flags bo_alloc_flags = 0;
+ enum anv_bo_alloc_flags bo_alloc_flags = ANV_BO_ALLOC_CAPTURE;
if (!pool->use_softpin)
bo_alloc_flags |= ANV_BO_ALLOC_32BIT_ADDRESS;
- uint64_t bo_flags = 0;
- if (pool->device->instance->physicalDevice.has_exec_capture)
- bo_flags |= EXEC_OBJECT_CAPTURE;
-
if (pool->use_softpin) {
uint32_t new_bo_size = size - pool->size;
struct anv_bo *new_bo;
+ assert(center_bo_offset == 0);
VkResult result = anv_device_alloc_bo(pool->device, new_bo_size,
bo_alloc_flags |
ANV_BO_ALLOC_FIXED_ADDRESS |
ANV_BO_ALLOC_MAPPED |
ANV_BO_ALLOC_SNOOPED,
+ pool->start_address + pool->size,
&new_bo);
if (result != VK_SUCCESS)
return result;
- assert(center_bo_offset == 0);
-
- new_bo->offset = pool->start_address + pool->size;
pool->bos[pool->nbos++] = new_bo;
/* This pointer will always point to the first BO in the list */
MAP_SHARED | MAP_POPULATE, pool->fd,
BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
if (map == MAP_FAILED)
- return vk_errorf(pool->device->instance, pool->device,
+ return vk_errorf(pool->device, pool->device,
VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
struct anv_bo *new_bo;
VkResult result = anv_device_import_bo_from_host_ptr(pool->device,
map, size,
bo_alloc_flags,
+ 0 /* client_address */,
&new_bo);
if (result != VK_SUCCESS) {
munmap(map, size);
* rather than the start of the block pool BO map.
*/
void*
-anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
+anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t size)
{
if (pool->use_softpin) {
struct anv_bo *bo = NULL;
}
assert(bo != NULL);
assert(offset >= bo_offset);
+ assert((offset - bo_offset) + size <= bo->size);
return bo->map + (offset - bo_offset);
} else {
* the pool and a 4K CPU page.
*/
static uint32_t
-anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
+anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
+ uint32_t contiguous_size)
{
VkResult result = VK_SUCCESS;
*/
assert(old_size > 0);
+ const uint32_t old_back = pool->center_bo_offset;
+ const uint32_t old_front = old_size - pool->center_bo_offset;
+
/* The back_used and front_used may actually be smaller than the actual
* requirement because they are based on the next pointers which are
* updated prior to calling this function.
*/
- uint32_t back_required = MAX2(back_used, pool->center_bo_offset);
- uint32_t front_required = MAX2(front_used, old_size - pool->center_bo_offset);
+ uint32_t back_required = MAX2(back_used, old_back);
+ uint32_t front_required = MAX2(front_used, old_front);
+
+ if (pool->use_softpin) {
+ /* With softpin, the pool is made up of a bunch of buffers with separate
+ * maps. Make sure we have enough contiguous space that we can get a
+ * properly contiguous map for the next chunk.
+ */
+ assert(old_back == 0);
+ front_required = MAX2(front_required, old_front + contiguous_size);
+ }
if (back_used * 2 <= back_required && front_used * 2 <= front_required) {
/* If we're in this case then this isn't the firsta allocation and we
*/
new.next = state.next + block_size;
do {
- new.end = anv_block_pool_grow(pool, pool_state);
+ new.end = anv_block_pool_grow(pool, pool_state, block_size);
} while (new.end < new.next);
old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
VkResult
anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
- uint64_t start_address,
+ uint64_t base_address,
+ int32_t start_offset,
uint32_t block_size)
{
+ /* We don't want to ever see signed overflow */
+ assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
+
VkResult result = anv_block_pool_init(&pool->block_pool, device,
- start_address,
+ base_address + start_offset,
block_size * 16);
if (result != VK_SUCCESS)
return result;
+ pool->start_offset = start_offset;
+
result = anv_state_table_init(&pool->table, device, 64);
if (result != VK_SUCCESS) {
anv_block_pool_finish(&pool->block_pool);
struct anv_state *state_i = anv_state_table_get(&pool->table,
st_idx + i);
state_i->alloc_size = block_size;
- state_i->offset = chunk_offset + block_size * i;
- state_i->map = anv_block_pool_map(&pool->block_pool, state_i->offset);
+ state_i->offset = pool->start_offset + chunk_offset + block_size * i;
+ state_i->map = anv_block_pool_map(&pool->block_pool,
+ state_i->offset,
+ state_i->alloc_size);
}
uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
state = anv_free_list_pop(&pool->buckets[bucket].free_list,
&pool->table);
if (state) {
- assert(state->offset >= 0);
+ assert(state->offset >= pool->start_offset);
goto done;
}
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
- state->offset = offset;
+ state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
- state->map = anv_block_pool_map(&pool->block_pool, offset);
+ state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
if (padding > 0) {
uint32_t return_offset = offset - padding;
struct anv_state *state;
uint32_t alloc_size = pool->block_size;
+ /* This function is only used with pools where start_offset == 0 */
+ assert(pool->start_offset == 0);
+
state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
if (state) {
- assert(state->offset < 0);
+ assert(state->offset < pool->start_offset);
goto done;
}
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
- state->offset = offset;
+ state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
- state->map = anv_block_pool_map(&pool->block_pool, state->offset);
+ state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
done:
VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
assert(util_is_power_of_two_or_zero(state.alloc_size));
unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
- if (state.offset < 0) {
+ if (state.offset < pool->start_offset) {
assert(state.alloc_size == pool->block_size);
anv_free_list_push(&pool->back_alloc_free_list,
&pool->table, state.idx, 1);
stream->block = ANV_STATE_NULL;
- stream->block_list = NULL;
-
/* Ensure that next + whatever > block_size. This way the first call to
* state_stream_alloc fetches a new block.
*/
stream->next = block_size;
+ util_dynarray_init(&stream->all_blocks, NULL);
+
VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
}
void
anv_state_stream_finish(struct anv_state_stream *stream)
{
- struct anv_state_stream_block *next = stream->block_list;
- while (next != NULL) {
- struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
- VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
- VG(VALGRIND_MAKE_MEM_UNDEFINED(next, stream->block_size));
- anv_state_pool_free_no_vg(stream->state_pool, sb.block);
- next = sb.next;
+ util_dynarray_foreach(&stream->all_blocks, struct anv_state, block) {
+ VG(VALGRIND_MEMPOOL_FREE(stream, block->map));
+ VG(VALGRIND_MAKE_MEM_NOACCESS(block->map, block->alloc_size));
+ anv_state_pool_free_no_vg(stream->state_pool, *block);
}
+ util_dynarray_fini(&stream->all_blocks);
VG(VALGRIND_DESTROY_MEMPOOL(stream));
}
stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
block_size, PAGE_SIZE);
+ util_dynarray_append(&stream->all_blocks,
+ struct anv_state, stream->block);
+ VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, block_size));
- struct anv_state_stream_block *sb = stream->block.map;
- VG_NOACCESS_WRITE(&sb->block, stream->block);
- VG_NOACCESS_WRITE(&sb->next, stream->block_list);
- stream->block_list = sb;
- VG(VG_NOACCESS_WRITE(&sb->_vg_ptr, NULL));
-
- VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, stream->block_size));
-
- /* Reset back to the start plus space for the header */
- stream->next = sizeof(*sb);
-
- offset = align_u32(stream->next, alignment);
+ /* Reset back to the start */
+ stream->next = offset = 0;
assert(offset + size <= stream->block.alloc_size);
}
+ const bool new_block = stream->next == 0;
struct anv_state state = stream->block;
state.offset += offset;
stream->next = offset + size;
-#ifdef HAVE_VALGRIND
- struct anv_state_stream_block *sb = stream->block_list;
- void *vg_ptr = VG_NOACCESS_READ(&sb->_vg_ptr);
- if (vg_ptr == NULL) {
- vg_ptr = state.map;
- VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr);
- VALGRIND_MEMPOOL_ALLOC(stream, vg_ptr, size);
+ if (new_block) {
+ assert(state.map == stream->block.map);
+ VG(VALGRIND_MEMPOOL_ALLOC(stream, state.map, size));
} else {
- void *state_end = state.map + state.alloc_size;
/* This only updates the mempool. The newly allocated chunk is still
* marked as NOACCESS. */
- VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, state_end - vg_ptr);
+ VG(VALGRIND_MEMPOOL_CHANGE(stream, stream->block.map, stream->block.map,
+ stream->next));
/* Mark the newly allocated chunk as undefined */
- VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size);
+ VG(VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size));
}
-#endif
return state;
}
void
-anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
- uint64_t bo_flags)
+anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
+ struct anv_state_pool *parent,
+ uint32_t count, uint32_t size, uint32_t alignment)
+{
+ pool->pool = parent;
+ pool->reserved_blocks = ANV_FREE_LIST_EMPTY;
+ pool->count = count;
+
+ for (unsigned i = 0; i < count; i++) {
+ struct anv_state state = anv_state_pool_alloc(pool->pool, size, alignment);
+ anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
+ }
+}
+
+void
+anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool)
+{
+ struct anv_state *state;
+
+ while ((state = anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table))) {
+ anv_state_pool_free(pool->pool, *state);
+ pool->count--;
+ }
+ assert(pool->count == 0);
+}
+
+struct anv_state
+anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool)
+{
+ return *anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table);
+}
+
+void
+anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
+ struct anv_state state)
+{
+ anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
+}
+
+void
+anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device)
{
pool->device = device;
- pool->bo_flags = bo_flags;
for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
util_sparse_array_free_list_init(&pool->free_list[i],
&device->bo_cache.bo_map, 0,
VkResult result = anv_device_alloc_bo(pool->device,
pow2_size,
ANV_BO_ALLOC_MAPPED |
- ANV_BO_ALLOC_SNOOPED,
+ ANV_BO_ALLOC_SNOOPED |
+ ANV_BO_ALLOC_CAPTURE,
+ 0 /* explicit_address */,
&bo);
if (result != VK_SUCCESS)
return result;
if (bo != NULL)
return bo;
- const struct anv_physical_device *physical_device =
- &device->instance->physicalDevice;
- const struct gen_device_info *devinfo = &physical_device->info;
+ const struct gen_device_info *devinfo = &device->info;
- const unsigned subslices = MAX2(physical_device->subslice_total, 1);
+ unsigned subslices = MAX2(device->physical->subslice_total, 1);
+
+ /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
+ *
+ * "Scratch Space per slice is computed based on 4 sub-slices. SW
+ * must allocate scratch space enough so that each slice has 4
+ * slices allowed."
+ *
+ * According to the other driver team, this applies to compute shaders
+ * as well. This is not currently documented at all.
+ *
+ * This hack is no longer necessary on Gen11+.
+ *
+ * For, Gen11+, scratch space allocation is based on the number of threads
+ * in the base configuration.
+ */
+ if (devinfo->gen >= 12)
+ subslices = devinfo->num_subslices[0];
+ else if (devinfo->gen == 11)
+ subslices = 8;
+ else if (devinfo->gen >= 9)
+ subslices = 4 * devinfo->num_slices;
unsigned scratch_ids_per_subslice;
- if (devinfo->gen >= 11) {
+ if (devinfo->gen >= 12) {
+ /* Same as ICL below, but with 16 EUs. */
+ scratch_ids_per_subslice = 16 * 8;
+ } else if (devinfo->gen == 11) {
/* The MEDIA_VFE_STATE docs say:
*
* "Starting with this configuration, the Maximum Number of
* so nothing will ever touch the top page.
*/
VkResult result = anv_device_alloc_bo(device, size,
- ANV_BO_ALLOC_32BIT_ADDRESS, &bo);
+ ANV_BO_ALLOC_32BIT_ADDRESS,
+ 0 /* explicit_address */,
+ &bo);
if (result != VK_SUCCESS)
return NULL; /* TODO */
anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags)
{
- struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ struct anv_physical_device *pdevice = device->physical;
uint64_t bo_flags = 0;
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
return bo_flags;
}
+static uint32_t
+anv_device_get_bo_align(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags)
+{
+ /* Gen12 CCS surface addresses need to be 64K aligned. */
+ if (device->info.gen >= 12 && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS))
+ return 64 * 1024;
+
+ return 4096;
+}
+
VkResult
anv_device_alloc_bo(struct anv_device *device,
uint64_t size,
enum anv_bo_alloc_flags alloc_flags,
+ uint64_t explicit_address,
struct anv_bo **bo_out)
{
+ if (!device->physical->has_implicit_ccs)
+ assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
+
const uint32_t bo_flags =
anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
/* The kernel is going to give us whole pages anyway */
size = align_u64(size, 4096);
- struct anv_bo new_bo;
- VkResult result = anv_bo_init_new(&new_bo, device, size);
- if (result != VK_SUCCESS)
- return result;
+ const uint32_t align = anv_device_get_bo_align(device, alloc_flags);
- new_bo.flags = bo_flags;
- new_bo.is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL);
+ uint64_t ccs_size = 0;
+ if (device->info.has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) {
+ /* Align the size up to the next multiple of 64K so we don't have any
+ * AUX-TT entries pointing from a 64K page to itself.
+ */
+ size = align_u64(size, 64 * 1024);
+
+ /* See anv_bo::_ccs_size */
+ ccs_size = align_u64(DIV_ROUND_UP(size, GEN_AUX_MAP_GEN12_CCS_SCALE), 4096);
+ }
+
+ uint32_t gem_handle = anv_gem_create(device, size + ccs_size);
+ if (gem_handle == 0)
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+ struct anv_bo new_bo = {
+ .gem_handle = gem_handle,
+ .refcount = 1,
+ .offset = -1,
+ .size = size,
+ ._ccs_size = ccs_size,
+ .flags = bo_flags,
+ .is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL),
+ .has_client_visible_address =
+ (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
+ .has_implicit_ccs = ccs_size > 0,
+ };
if (alloc_flags & ANV_BO_ALLOC_MAPPED) {
new_bo.map = anv_gem_mmap(device, new_bo.gem_handle, 0, size, 0);
if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
new_bo.has_fixed_address = true;
- } else {
- if (!anv_vma_alloc(device, &new_bo)) {
+ new_bo.offset = explicit_address;
+ } else if (new_bo.flags & EXEC_OBJECT_PINNED) {
+ new_bo.offset = anv_vma_alloc(device, new_bo.size + new_bo._ccs_size,
+ align, alloc_flags, explicit_address);
+ if (new_bo.offset == 0) {
if (new_bo.map)
- anv_gem_munmap(new_bo.map, size);
+ anv_gem_munmap(device, new_bo.map, size);
anv_gem_close(device, new_bo.gem_handle);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
+ } else {
+ assert(!new_bo.has_client_visible_address);
+ }
+
+ if (new_bo._ccs_size > 0) {
+ assert(device->info.has_aux_map);
+ gen_aux_map_add_mapping(device->aux_map_ctx,
+ gen_canonical_address(new_bo.offset),
+ gen_canonical_address(new_bo.offset + new_bo.size),
+ new_bo.size, 0 /* format_bits */);
}
assert(new_bo.gem_handle);
anv_device_import_bo_from_host_ptr(struct anv_device *device,
void *host_ptr, uint32_t size,
enum anv_bo_alloc_flags alloc_flags,
+ uint64_t client_address,
struct anv_bo **bo_out)
{
assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
ANV_BO_ALLOC_SNOOPED |
ANV_BO_ALLOC_FIXED_ADDRESS)));
+ /* We can't do implicit CCS with an aux table on shared memory */
+ if (!device->physical->has_implicit_ccs || device->info.has_aux_map)
+ assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
+
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
assert(bo->gem_handle == gem_handle);
if (bo_flags != bo->flags) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"same host pointer imported two different ways");
}
+
+ if (bo->has_client_visible_address !=
+ ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported with and without buffer "
+ "device address");
+ }
+
+ if (client_address && client_address != gen_48b_address(bo->offset)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported at two different "
+ "addresses");
+ }
+
__sync_fetch_and_add(&bo->refcount, 1);
} else {
- struct anv_bo new_bo;
- anv_bo_init(&new_bo, gem_handle, size);
- new_bo.map = host_ptr;
- new_bo.flags = bo_flags;
- new_bo.is_external = true;
- new_bo.from_host_ptr = true;
-
- if (!anv_vma_alloc(device, &new_bo)) {
- anv_gem_close(device, new_bo.gem_handle);
- pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "failed to allocate virtual address for BO");
+ struct anv_bo new_bo = {
+ .gem_handle = gem_handle,
+ .refcount = 1,
+ .offset = -1,
+ .size = size,
+ .map = host_ptr,
+ .flags = bo_flags,
+ .is_external = true,
+ .from_host_ptr = true,
+ .has_client_visible_address =
+ (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
+ };
+
+ assert(client_address == gen_48b_address(client_address));
+ if (new_bo.flags & EXEC_OBJECT_PINNED) {
+ assert(new_bo._ccs_size == 0);
+ new_bo.offset = anv_vma_alloc(device, new_bo.size,
+ anv_device_get_bo_align(device,
+ alloc_flags),
+ alloc_flags, client_address);
+ if (new_bo.offset == 0) {
+ anv_gem_close(device, new_bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+ } else {
+ assert(!new_bo.has_client_visible_address);
}
*bo = new_bo;
anv_device_import_bo(struct anv_device *device,
int fd,
enum anv_bo_alloc_flags alloc_flags,
+ uint64_t client_address,
struct anv_bo **bo_out)
{
assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
ANV_BO_ALLOC_SNOOPED |
ANV_BO_ALLOC_FIXED_ADDRESS)));
+ /* We can't do implicit CCS with an aux table on shared memory */
+ if (!device->physical->has_implicit_ccs || device->info.has_aux_map)
+ assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
+
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
if ((bo->flags & EXEC_OBJECT_PINNED) !=
(bo_flags & EXEC_OBJECT_PINNED)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported two different ways");
}
(bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
(bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported on two different heaps");
}
+ if (bo->has_client_visible_address !=
+ ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported with and without buffer "
+ "device address");
+ }
+
+ if (client_address && client_address != gen_48b_address(bo->offset)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported at two different "
+ "addresses");
+ }
+
bo->flags = new_flags;
__sync_fetch_and_add(&bo->refcount, 1);
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
- struct anv_bo new_bo;
- anv_bo_init(&new_bo, gem_handle, size);
- new_bo.flags = bo_flags;
- new_bo.is_external = true;
-
- if (!anv_vma_alloc(device, &new_bo)) {
- anv_gem_close(device, new_bo.gem_handle);
- pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "failed to allocate virtual address for BO");
+ struct anv_bo new_bo = {
+ .gem_handle = gem_handle,
+ .refcount = 1,
+ .offset = -1,
+ .size = size,
+ .flags = bo_flags,
+ .is_external = true,
+ .has_client_visible_address =
+ (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
+ };
+
+ assert(client_address == gen_48b_address(client_address));
+ if (new_bo.flags & EXEC_OBJECT_PINNED) {
+ assert(new_bo._ccs_size == 0);
+ new_bo.offset = anv_vma_alloc(device, new_bo.size,
+ anv_device_get_bo_align(device,
+ alloc_flags),
+ alloc_flags, client_address);
+ if (new_bo.offset == 0) {
+ anv_gem_close(device, new_bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+ } else {
+ assert(!new_bo.has_client_visible_address);
}
*bo = new_bo;
assert(bo->refcount == 0);
if (bo->map && !bo->from_host_ptr)
- anv_gem_munmap(bo->map, bo->size);
+ anv_gem_munmap(device, bo->map, bo->size);
+
+ if (bo->_ccs_size > 0) {
+ assert(device->physical->has_implicit_ccs);
+ assert(device->info.has_aux_map);
+ assert(bo->has_implicit_ccs);
+ gen_aux_map_unmap_range(device->aux_map_ctx,
+ gen_canonical_address(bo->offset),
+ bo->size);
+ }
- if (!bo->has_fixed_address)
- anv_vma_free(device, bo);
+ if ((bo->flags & EXEC_OBJECT_PINNED) && !bo->has_fixed_address)
+ anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
+
+ uint32_t gem_handle = bo->gem_handle;
+
+ /* Memset the BO just in case. The refcount being zero should be enough to
+ * prevent someone from assuming the data is valid but it's safer to just
+ * stomp to zero just in case. We explicitly do this *before* we close the
+ * GEM handle to ensure that if anyone allocates something and gets the
+ * same GEM handle, the memset has already happen and won't stomp all over
+ * any data they may write in this BO.
+ */
+ memset(bo, 0, sizeof(*bo));
- anv_gem_close(device, bo->gem_handle);
+ anv_gem_close(device, gem_handle);
/* Don't unlock until we've actually closed the BO. The whole point of
* the BO cache is to ensure that we correctly handle races with creating