#include <sys/types.h>
#include <stdbool.h>
#include <time.h>
+#include <unistd.h>
#include "errno.h"
-#ifndef ETIME
-#define ETIME ETIMEDOUT
-#endif
+#include "common/gen_aux_map.h"
#include "common/gen_clflush.h"
#include "dev/gen_debug.h"
#include "common/gen_gem.h"
#include "dev/gen_device_info.h"
#include "main/macros.h"
+#include "os/os_mman.h"
#include "util/debug.h"
#include "util/macros.h"
#include "util/hash_table.h"
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
-/**
- * Call ioctl, restarting if it is interupted
- */
-int
-drm_ioctl(int fd, unsigned long request, void *arg)
-{
- int ret;
-
- do {
- ret = ioctl(fd, request, arg);
- } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
- return ret;
-}
-
static inline int
atomic_add_unless(int *v, int add, int unless)
{
return names[memzone];
}
-/**
- * Iris fixed-size bucketing VMA allocator.
- *
- * The BO cache maintains "cache buckets" for buffers of various sizes.
- * All buffers in a given bucket are identically sized - when allocating,
- * we always round up to the bucket size. This means that virtually all
- * allocations are fixed-size; only buffers which are too large to fit in
- * a bucket can be variably-sized.
- *
- * We create an allocator for each bucket. Each contains a free-list, where
- * each node contains a <starting address, 64-bit bitmap> pair. Each bit
- * represents a bucket-sized block of memory. (At the first level, each
- * bit corresponds to a page. For the second bucket, bits correspond to
- * two pages, and so on.) 1 means a block is free, and 0 means it's in-use.
- * The lowest bit in the bitmap is for the first block.
- *
- * This makes allocations cheap - any bit of any node will do. We can pick
- * the head of the list and use ffs() to find a free block. If there are
- * none, we allocate 64 blocks from a larger allocator - either a bigger
- * bucketing allocator, or a fallback top-level allocator for large objects.
- */
-struct vma_bucket_node {
- uint64_t start_address;
- uint64_t bitmap;
-};
-
struct bo_cache_bucket {
/** List of cached BOs. */
struct list_head head;
/** Size of this bucket, in bytes. */
uint64_t size;
-
- /** List of vma_bucket_nodes. */
- struct util_dynarray vma_list[IRIS_MEMZONE_COUNT];
};
struct iris_bufmgr {
+ /**
+ * List into the list of bufmgr.
+ */
+ struct list_head link;
+
+ uint32_t refcount;
+
int fd;
mtx_t lock;
struct hash_table *name_table;
struct hash_table *handle_table;
+ /**
+ * List of BOs which we've effectively freed, but are hanging on to
+ * until they're idle before closing and returning the VMA.
+ */
+ struct list_head zombie_list;
+
struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
bool has_llc:1;
bool bo_reuse:1;
+
+ struct gen_aux_map_context *aux_map_ctx;
+};
+
+static mtx_t global_bufmgr_list_mutex = _MTX_INITIALIZER_NP;
+static struct list_head global_bufmgr_list = {
+ .next = &global_bufmgr_list,
+ .prev = &global_bufmgr_list,
};
static int bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
enum iris_memory_zone memzone,
uint64_t size, uint64_t alignment);
-static uint32_t
-key_hash_uint(const void *key)
-{
- return _mesa_hash_data(key, 4);
-}
-
-static bool
-key_uint_equal(const void *a, const void *b)
-{
- return *((unsigned *) a) == *((unsigned *) b);
-}
-
static struct iris_bo *
-hash_find_bo(struct hash_table *ht, unsigned int key)
+find_and_ref_external_bo(struct hash_table *ht, unsigned int key)
{
struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
- return entry ? (struct iris_bo *) entry->data : NULL;
+ struct iris_bo *bo = entry ? entry->data : NULL;
+
+ if (bo) {
+ assert(bo->external);
+ assert(!bo->reusable);
+
+ /* Being non-reusable, the BO cannot be in the cache lists, but it
+ * may be in the zombie list if it had reached zero references, but
+ * we hadn't yet closed it...and then reimported the same BO. If it
+ * is, then remove it since it's now been resurrected.
+ */
+ if (bo->head.prev || bo->head.next)
+ list_del(&bo->head);
+
+ iris_bo_reference(bo);
+ }
+
+ return bo;
}
/**
&bufmgr->cache_bucket[index] : NULL;
}
-static enum iris_memory_zone
-memzone_for_address(uint64_t address)
+enum iris_memory_zone
+iris_memzone_for_address(uint64_t address)
{
STATIC_ASSERT(IRIS_MEMZONE_OTHER_START > IRIS_MEMZONE_DYNAMIC_START);
STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START > IRIS_MEMZONE_SURFACE_START);
return IRIS_MEMZONE_SHADER;
}
-static uint64_t
-bucket_vma_alloc(struct iris_bufmgr *bufmgr,
- struct bo_cache_bucket *bucket,
- enum iris_memory_zone memzone)
-{
- struct util_dynarray *vma_list = &bucket->vma_list[memzone];
- struct vma_bucket_node *node;
-
- if (vma_list->size == 0) {
- /* This bucket allocator is out of space - allocate a new block of
- * memory for 64 blocks from a larger allocator (either a larger
- * bucket or util_vma).
- *
- * We align the address to the node size (64 blocks) so that
- * bucket_vma_free can easily compute the starting address of this
- * block by rounding any address we return down to the node size.
- *
- * Set the first bit used, and return the start address.
- */
- const uint64_t node_size = 64ull * bucket->size;
- node = util_dynarray_grow(vma_list, sizeof(struct vma_bucket_node));
-
- if (unlikely(!node))
- return 0ull;
-
- uint64_t addr = vma_alloc(bufmgr, memzone, node_size, node_size);
- node->start_address = gen_48b_address(addr);
- node->bitmap = ~1ull;
- return node->start_address;
- }
-
- /* Pick any bit from any node - they're all the right size and free. */
- node = util_dynarray_top_ptr(vma_list, struct vma_bucket_node);
- int bit = ffsll(node->bitmap) - 1;
- assert(bit >= 0 && bit <= 63);
-
- /* Reserve the memory by clearing the bit. */
- assert((node->bitmap & (1ull << bit)) != 0ull);
- node->bitmap &= ~(1ull << bit);
-
- uint64_t addr = node->start_address + bit * bucket->size;
-
- /* If this node is now completely full, remove it from the free list. */
- if (node->bitmap == 0ull) {
- (void) util_dynarray_pop(vma_list, struct vma_bucket_node);
- }
-
- return addr;
-}
-
-static void
-bucket_vma_free(struct bo_cache_bucket *bucket, uint64_t address)
-{
- enum iris_memory_zone memzone = memzone_for_address(address);
- struct util_dynarray *vma_list = &bucket->vma_list[memzone];
- const uint64_t node_bytes = 64ull * bucket->size;
- struct vma_bucket_node *node = NULL;
-
- /* bucket_vma_alloc allocates 64 blocks at a time, and aligns it to
- * that 64 block size. So, we can round down to get the starting address.
- */
- uint64_t start = (address / node_bytes) * node_bytes;
-
- /* Dividing the offset from start by bucket size gives us the bit index. */
- int bit = (address - start) / bucket->size;
-
- assert(start + bit * bucket->size == address);
-
- util_dynarray_foreach(vma_list, struct vma_bucket_node, cur) {
- if (cur->start_address == start) {
- node = cur;
- break;
- }
- }
-
- if (!node) {
- /* No node - the whole group of 64 blocks must have been in-use. */
- node = util_dynarray_grow(vma_list, sizeof(struct vma_bucket_node));
-
- if (unlikely(!node))
- return; /* bogus, leaks some GPU VMA, but nothing we can do... */
-
- node->start_address = start;
- node->bitmap = 0ull;
- }
-
- /* Set the bit to return the memory. */
- assert((node->bitmap & (1ull << bit)) == 0ull);
- node->bitmap |= 1ull << bit;
-
- /* The block might be entirely free now, and if so, we could return it
- * to the larger allocator. But we may as well hang on to it, in case
- * we get more allocations at this block size.
- */
-}
-
-static struct bo_cache_bucket *
-get_bucket_allocator(struct iris_bufmgr *bufmgr,
- enum iris_memory_zone memzone,
- uint64_t size)
-{
- /* Skip using the bucket allocator for very large sizes, as it allocates
- * 64 of them and this can balloon rather quickly.
- */
- if (size > 1024 * PAGE_SIZE)
- return NULL;
-
- struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
-
- if (bucket && bucket->size == size)
- return bucket;
-
- return NULL;
-}
-
/**
* Allocate a section of virtual memory for a buffer, assigning an address.
*
uint64_t size,
uint64_t alignment)
{
+ /* Force alignment to be some number of pages */
+ alignment = ALIGN(alignment, PAGE_SIZE);
+
if (memzone == IRIS_MEMZONE_BORDER_COLOR_POOL)
return IRIS_BORDER_COLOR_POOL_ADDRESS;
if (memzone == IRIS_MEMZONE_BINDER)
return IRIS_MEMZONE_BINDER_START;
- struct bo_cache_bucket *bucket =
- get_bucket_allocator(bufmgr, memzone, size);
- uint64_t addr;
-
- if (bucket) {
- addr = bucket_vma_alloc(bufmgr, bucket, memzone);
- } else {
- addr = util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size,
- alignment);
- }
+ uint64_t addr =
+ util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size, alignment);
assert((addr >> 48ull) == 0);
assert((addr % alignment) == 0);
if (address == 0ull)
return;
- enum iris_memory_zone memzone = memzone_for_address(address);
+ enum iris_memory_zone memzone = iris_memzone_for_address(address);
/* The binder handles its own allocations. */
if (memzone == IRIS_MEMZONE_BINDER)
return;
- struct bo_cache_bucket *bucket =
- get_bucket_allocator(bufmgr, memzone, size);
-
- if (bucket) {
- bucket_vma_free(bucket, address);
- } else {
- util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
- }
+ util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
}
int
struct iris_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
if (ret == 0) {
bo->idle = !busy.busy;
return busy.busy;
.retained = 1,
};
- drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+ gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
return madv.retained;
}
-/* drop the oldest entries that have been purged by the kernel */
-static void
-iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
- struct bo_cache_bucket *bucket)
-{
- list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
- if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
- break;
-
- list_del(&bo->head);
- bo_free(bo);
- }
-}
-
static struct iris_bo *
bo_calloc(void)
{
}
static struct iris_bo *
-bo_alloc_internal(struct iris_bufmgr *bufmgr,
- const char *name,
- uint64_t size,
- enum iris_memory_zone memzone,
- unsigned flags,
- uint32_t tiling_mode,
- uint32_t stride)
+alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
+ struct bo_cache_bucket *bucket,
+ uint32_t alignment,
+ enum iris_memory_zone memzone,
+ unsigned flags,
+ bool match_zone)
{
- struct iris_bo *bo;
- unsigned int page_size = getpagesize();
- int ret;
- struct bo_cache_bucket *bucket;
- bool alloc_from_cache;
- uint64_t bo_size;
- bool zeroed = false;
+ if (!bucket)
+ return NULL;
- if (flags & BO_ALLOC_ZEROED)
- zeroed = true;
+ struct iris_bo *bo = NULL;
- if ((flags & BO_ALLOC_COHERENT) && !bufmgr->has_llc) {
- bo_size = MAX2(ALIGN(size, page_size), page_size);
- bucket = NULL;
- goto skip_cache;
+ list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) {
+ /* Try a little harder to find one that's already in the right memzone */
+ if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset))
+ continue;
+
+ /* If the last BO in the cache is busy, there are no idle BOs. Bail,
+ * either falling back to a non-matching memzone, or if that fails,
+ * allocating a fresh buffer.
+ */
+ if (iris_bo_busy(cur))
+ return NULL;
+
+ list_del(&cur->head);
+
+ /* Tell the kernel we need this BO. If it still exists, we're done! */
+ if (iris_bo_madvise(cur, I915_MADV_WILLNEED)) {
+ bo = cur;
+ break;
+ }
+
+ /* This BO was purged, throw it out and keep looking. */
+ bo_free(cur);
}
- /* Round the allocated size up to a power of two number of pages. */
- bucket = bucket_for_size(bufmgr, size);
+ if (!bo)
+ return NULL;
- /* If we don't have caching at this size, don't actually round the
- * allocation up.
+ if (bo->aux_map_address) {
+ /* This buffer was associated with an aux-buffer range. We make sure
+ * that buffers are not reused from the cache while the buffer is (busy)
+ * being used by an executing batch. Since we are here, the buffer is no
+ * longer being used by a batch and the buffer was deleted (in order to
+ * end up in the cache). Therefore its old aux-buffer range can be
+ * removed from the aux-map.
+ */
+ if (bo->bufmgr->aux_map_ctx)
+ gen_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
+ bo->size);
+ bo->aux_map_address = 0;
+ }
+
+ /* If the cached BO isn't in the right memory zone, or the alignment
+ * isn't sufficient, free the old memory and assign it a new address.
*/
- if (bucket == NULL) {
- bo_size = MAX2(ALIGN(size, page_size), page_size);
- } else {
- bo_size = bucket->size;
+ if (memzone != iris_memzone_for_address(bo->gtt_offset) ||
+ bo->gtt_offset % alignment != 0) {
+ vma_free(bufmgr, bo->gtt_offset, bo->size);
+ bo->gtt_offset = 0ull;
}
- mtx_lock(&bufmgr->lock);
- /* Get a buffer out of the cache if available */
-retry:
- alloc_from_cache = false;
- if (bucket != NULL && !list_empty(&bucket->head)) {
- /* If the last BO in the cache is idle, then reuse it. Otherwise,
- * allocate a fresh buffer to avoid stalling.
- */
- bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
- if (!iris_bo_busy(bo)) {
- alloc_from_cache = true;
- list_del(&bo->head);
+ /* Zero the contents if necessary. If this fails, fall back to
+ * allocating a fresh BO, which will always be zeroed by the kernel.
+ */
+ if (flags & BO_ALLOC_ZEROED) {
+ void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+ if (map) {
+ memset(map, 0, bo->size);
+ } else {
+ bo_free(bo);
+ return NULL;
}
+ }
- if (alloc_from_cache) {
- if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
- bo_free(bo);
- iris_bo_cache_purge_bucket(bufmgr, bucket);
- goto retry;
- }
-
- if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
- bo_free(bo);
- goto retry;
- }
-
- if (zeroed) {
- void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
- if (!map) {
- bo_free(bo);
- goto retry;
- }
- memset(map, 0, bo_size);
- }
- }
+ return bo;
+}
+
+static struct iris_bo *
+alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size)
+{
+ struct iris_bo *bo = bo_calloc();
+ if (!bo)
+ return NULL;
+
+ struct drm_i915_gem_create create = { .size = bo_size };
+
+ /* All new BOs we get from the kernel are zeroed, so we don't need to
+ * worry about that here.
+ */
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
+ free(bo);
+ return NULL;
}
- if (alloc_from_cache) {
- /* If the cached BO isn't in the right memory zone, free the old
- * memory and assign it a new address.
- */
- if (memzone != memzone_for_address(bo->gtt_offset)) {
- vma_free(bufmgr, bo->gtt_offset, bo->size);
- bo->gtt_offset = 0ull;
- }
- } else {
-skip_cache:
- bo = bo_calloc();
- if (!bo)
- goto err;
+ bo->gem_handle = create.handle;
+ bo->bufmgr = bufmgr;
+ bo->size = bo_size;
+ bo->idle = true;
+ bo->tiling_mode = I915_TILING_NONE;
+ bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo->stride = 0;
- bo->size = bo_size;
- bo->idle = true;
+ /* Calling set_domain() will allocate pages for the BO outside of the
+ * struct mutex lock in the kernel, which is more efficient than waiting
+ * to create them during the first execbuf that uses the BO.
+ */
+ struct drm_i915_gem_set_domain sd = {
+ .handle = bo->gem_handle,
+ .read_domains = I915_GEM_DOMAIN_CPU,
+ .write_domain = 0,
+ };
- struct drm_i915_gem_create create = { .size = bo_size };
+ if (gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
+ bo_free(bo);
+ return NULL;
+ }
- /* All new BOs we get from the kernel are zeroed, so we don't need to
- * worry about that here.
- */
- ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
- if (ret != 0) {
- free(bo);
- goto err;
- }
+ return bo;
+}
- bo->gem_handle = create.handle;
+static struct iris_bo *
+bo_alloc_internal(struct iris_bufmgr *bufmgr,
+ const char *name,
+ uint64_t size,
+ uint32_t alignment,
+ enum iris_memory_zone memzone,
+ unsigned flags,
+ uint32_t tiling_mode,
+ uint32_t stride)
+{
+ struct iris_bo *bo;
+ unsigned int page_size = getpagesize();
+ struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
- bo->bufmgr = bufmgr;
+ /* Round the size up to the bucket size, or if we don't have caching
+ * at this size, a multiple of the page size.
+ */
+ uint64_t bo_size =
+ bucket ? bucket->size : MAX2(ALIGN(size, page_size), page_size);
- bo->tiling_mode = I915_TILING_NONE;
- bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- bo->stride = 0;
+ mtx_lock(&bufmgr->lock);
- if (bo_set_tiling_internal(bo, tiling_mode, stride))
- goto err_free;
+ /* Get a buffer out of the cache if available. First, we try to find
+ * one with a matching memory zone so we can avoid reallocating VMA.
+ */
+ bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags, true);
- /* Calling set_domain() will allocate pages for the BO outside of the
- * struct mutex lock in the kernel, which is more efficient than waiting
- * to create them during the first execbuf that uses the BO.
- */
- struct drm_i915_gem_set_domain sd = {
- .handle = bo->gem_handle,
- .read_domains = I915_GEM_DOMAIN_CPU,
- .write_domain = 0,
- };
+ /* If that fails, we try for any cached BO, without matching memzone. */
+ if (!bo) {
+ bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags,
+ false);
+ }
+
+ mtx_unlock(&bufmgr->lock);
+
+ if (!bo) {
+ bo = alloc_fresh_bo(bufmgr, bo_size);
+ if (!bo)
+ return NULL;
+ }
+
+ if (bo->gtt_offset == 0ull) {
+ mtx_lock(&bufmgr->lock);
+ bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, alignment);
+ mtx_unlock(&bufmgr->lock);
- if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
+ if (bo->gtt_offset == 0ull)
goto err_free;
}
+ if (bo_set_tiling_internal(bo, tiling_mode, stride))
+ goto err_free;
+
bo->name = name;
p_atomic_set(&bo->refcount, 1);
bo->reusable = bucket && bufmgr->bo_reuse;
if (memzone < IRIS_MEMZONE_OTHER)
bo->kflags |= EXEC_OBJECT_CAPTURE;
- if (bo->gtt_offset == 0ull) {
- bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);
-
- if (bo->gtt_offset == 0ull)
- goto err_free;
- }
-
- mtx_unlock(&bufmgr->lock);
-
if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
struct drm_i915_gem_caching arg = {
.handle = bo->gem_handle,
.caching = 1,
};
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
bo->cache_coherent = true;
bo->reusable = false;
}
err_free:
bo_free(bo);
-err:
- mtx_unlock(&bufmgr->lock);
return NULL;
}
uint64_t size,
enum iris_memory_zone memzone)
{
- return bo_alloc_internal(bufmgr, name, size, memzone,
+ return bo_alloc_internal(bufmgr, name, size, 1, memzone,
0, I915_TILING_NONE, 0);
}
struct iris_bo *
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
- uint64_t size, enum iris_memory_zone memzone,
+ uint64_t size, uint32_t alignment,
+ enum iris_memory_zone memzone,
uint32_t tiling_mode, uint32_t pitch, unsigned flags)
{
- return bo_alloc_internal(bufmgr, name, size, memzone,
+ return bo_alloc_internal(bufmgr, name, size, alignment, memzone,
flags, tiling_mode, pitch);
}
.user_ptr = (uintptr_t)ptr,
.user_size = size,
};
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
goto err_free;
bo->gem_handle = arg.handle;
.handle = bo->gem_handle,
.read_domains = I915_GEM_DOMAIN_CPU,
};
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
goto err_close;
bo->name = name;
bo->bufmgr = bufmgr;
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
+
+ mtx_lock(&bufmgr->lock);
bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
+ mtx_unlock(&bufmgr->lock);
+
if (bo->gtt_offset == 0ull)
goto err_close;
return bo;
err_close:
- drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
+ gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
err_free:
free(bo);
return NULL;
* provides a sufficiently fast match.
*/
mtx_lock(&bufmgr->lock);
- bo = hash_find_bo(bufmgr->name_table, handle);
- if (bo) {
- iris_bo_reference(bo);
+ bo = find_and_ref_external_bo(bufmgr->name_table, handle);
+ if (bo)
goto out;
- }
struct drm_gem_open open_arg = { .name = handle };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
* object from the kernel before by looking through the list
* again for a matching gem_handle
*/
- bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
- if (bo) {
- iris_bo_reference(bo);
+ bo = find_and_ref_external_bo(bufmgr->handle_table, open_arg.handle);
+ if (bo)
goto out;
- }
bo = bo_calloc();
if (!bo)
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
- ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+ ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
if (ret != 0)
goto err_unref;
}
static void
-bo_free(struct iris_bo *bo)
+bo_close(struct iris_bo *bo)
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- if (bo->map_cpu && !bo->userptr) {
- VG_NOACCESS(bo->map_cpu, bo->size);
- munmap(bo->map_cpu, bo->size);
- }
- if (bo->map_wc) {
- VG_NOACCESS(bo->map_wc, bo->size);
- munmap(bo->map_wc, bo->size);
- }
- if (bo->map_gtt) {
- VG_NOACCESS(bo->map_gtt, bo->size);
- munmap(bo->map_gtt, bo->size);
- }
-
if (bo->external) {
struct hash_entry *entry;
/* Close this object */
struct drm_gem_close close = { .handle = bo->gem_handle };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo->gem_handle, bo->name, strerror(errno));
}
+ if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
+ gen_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
+ bo->size);
+ }
+
+ /* Return the VMA for reuse */
vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
free(bo);
}
+static void
+bo_free(struct iris_bo *bo)
+{
+ struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+ if (bo->map_cpu && !bo->userptr) {
+ VG_NOACCESS(bo->map_cpu, bo->size);
+ os_munmap(bo->map_cpu, bo->size);
+ }
+ if (bo->map_wc) {
+ VG_NOACCESS(bo->map_wc, bo->size);
+ os_munmap(bo->map_wc, bo->size);
+ }
+ if (bo->map_gtt) {
+ VG_NOACCESS(bo->map_gtt, bo->size);
+ os_munmap(bo->map_gtt, bo->size);
+ }
+
+ if (bo->idle) {
+ bo_close(bo);
+ } else {
+ /* Defer closing the GEM BO and returning the VMA for reuse until the
+ * BO is idle. Just move it to the dead list for now.
+ */
+ list_addtail(&bo->head, &bufmgr->zombie_list);
+ }
+}
+
/** Frees all cached buffers significantly older than @time. */
static void
cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
}
}
+ list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
+ /* Stop once we reach a busy BO - all others past this point were
+ * freed more recently so are likely also busy.
+ */
+ if (!bo->idle && iris_bo_busy(bo))
+ break;
+
+ list_del(&bo->head);
+ bo_close(bo);
+ }
+
bufmgr->time = time;
}
.handle = bo->gem_handle,
.size = bo->size,
};
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
VG_NOACCESS(map, bo->size);
- munmap(map, bo->size);
+ os_munmap(map, bo->size);
}
}
assert(bo->map_cpu);
.size = bo->size,
.flags = I915_MMAP_WC,
};
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
VG_NOACCESS(map, bo->size);
- munmap(map, bo->size);
+ os_munmap(map, bo->size);
}
}
assert(bo->map_wc);
struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
/* Get the fake offset back... */
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
}
/* and mmap it. */
- void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
- MAP_SHARED, bufmgr->fd, mmap_arg.offset);
+ void *map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr->fd, mmap_arg.offset);
if (map == MAP_FAILED) {
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
VG_NOACCESS(map, bo->size);
- munmap(map, bo->size);
+ os_munmap(map, bo->size);
}
}
assert(bo->map_gtt);
.bo_handle = bo->gem_handle,
.timeout_ns = timeout_ns,
};
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
if (ret != 0)
return -errno;
return ret;
}
-void
+static void
iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
{
+ /* Free aux-map buffers */
+ gen_aux_map_finish(bufmgr->aux_map_ctx);
+
+ /* bufmgr will no longer try to free VMA entries in the aux-map */
+ bufmgr->aux_map_ctx = NULL;
+
mtx_destroy(&bufmgr->lock);
/* Free any cached buffer objects we were going to reuse */
bo_free(bo);
}
+ }
- for (int z = 0; z < IRIS_MEMZONE_COUNT; z++)
- util_dynarray_fini(&bucket->vma_list[z]);
+ /* Close any buffer objects on the dead list. */
+ list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
+ list_del(&bo->head);
+ bo_close(bo);
}
_mesa_hash_table_destroy(bufmgr->name_table, NULL);
util_vma_heap_finish(&bufmgr->vma_allocator[z]);
}
+ close(bufmgr->fd);
+
free(bufmgr);
}
}
struct iris_bo *
-iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
+iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd,
+ uint32_t tiling, uint32_t stride)
{
uint32_t handle;
struct iris_bo *bo;
* for named buffers, we must not create two bo's pointing at the same
* kernel object
*/
- bo = hash_find_bo(bufmgr->handle_table, handle);
- if (bo) {
- iris_bo_reference(bo);
+ bo = find_and_ref_external_bo(bufmgr->handle_table, handle);
+ if (bo)
goto out;
- }
bo = bo_calloc();
if (!bo)
bo->size = ret;
bo->bufmgr = bufmgr;
-
- bo->gem_handle = handle;
- _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
-
bo->name = "prime";
bo->reusable = false;
bo->external = true;
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
+ bo->gem_handle = handle;
+ _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
goto err;
- bo->tiling_mode = get_tiling.tiling_mode;
- bo->swizzle_mode = get_tiling.swizzle_mode;
- /* XXX stride is unknown */
+ if (get_tiling.tiling_mode == tiling || tiling > I915_TILING_LAST) {
+ bo->tiling_mode = get_tiling.tiling_mode;
+ bo->swizzle_mode = get_tiling.swizzle_mode;
+ /* XXX stride is unknown */
+ } else {
+ if (bo_set_tiling_internal(bo, tiling, stride)) {
+ goto err;
+ }
+ }
out:
mtx_unlock(&bufmgr->lock);
if (!bo->external) {
_mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
bo->external = true;
+ bo->reusable = false;
}
}
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- if (bo->external)
+ if (bo->external) {
+ assert(!bo->reusable);
return;
+ }
mtx_lock(&bufmgr->lock);
iris_bo_make_external_locked(bo);
DRM_CLOEXEC, prime_fd) != 0)
return -errno;
- bo->reusable = false;
-
return 0;
}
if (!bo->global_name) {
struct drm_gem_flink flink = { .handle = bo->gem_handle };
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
return -errno;
mtx_lock(&bufmgr->lock);
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
}
mtx_unlock(&bufmgr->lock);
-
- bo->reusable = false;
}
*name = bo->global_name;
assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
list_inithead(&bufmgr->cache_bucket[i].head);
- for (int z = 0; z < IRIS_MEMZONE_COUNT; z++)
- util_dynarray_init(&bufmgr->cache_bucket[i].vma_list[z], NULL);
bufmgr->cache_bucket[i].size = size;
bufmgr->num_buckets++;
iris_create_hw_context(struct iris_bufmgr *bufmgr)
{
struct drm_i915_gem_context_create create = { };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
return 0;
}
+ /* Upon declaring a GPU hang, the kernel will zap the guilty context
+ * back to the default logical HW state and attempt to continue on to
+ * our next submitted batchbuffer. However, our render batches assume
+ * the previous GPU state is preserved, and only emit commands needed
+ * to incrementally change that state. In particular, we inherit the
+ * STATE_BASE_ADDRESS and PIPELINE_SELECT settings, which are critical.
+ * With default base addresses, our next batches will almost certainly
+ * cause more GPU hangs, leading to repeated hangs until we're banned
+ * or the machine is dead.
+ *
+ * Here we tell the kernel not to attempt to recover our context but
+ * immediately (on the next batchbuffer submission) report that the
+ * context is lost, and we will do the recovery ourselves. Ideally,
+ * we'll have two lost batches instead of a continual stream of hangs.
+ */
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = create.ctx_id,
+ .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ .value = false,
+ };
+ drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p);
+
return create.ctx_id;
}
+static int
+iris_hw_context_get_priority(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
+{
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx_id,
+ .param = I915_CONTEXT_PARAM_PRIORITY,
+ };
+ drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+ return p.value; /* on error, return 0 i.e. default priority */
+}
+
int
iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
uint32_t ctx_id,
int err;
err = 0;
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
err = -errno;
return err;
}
+uint32_t
+iris_clone_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
+{
+ uint32_t new_ctx = iris_create_hw_context(bufmgr);
+
+ if (new_ctx) {
+ int priority = iris_hw_context_get_priority(bufmgr, ctx_id);
+ iris_hw_context_set_priority(bufmgr, new_ctx, priority);
+ }
+
+ return new_ctx;
+}
+
void
iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
{
struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
if (ctx_id != 0 &&
- drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
+ gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
strerror(errno));
}
iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
{
struct drm_i915_reg_read reg_read = { .offset = offset };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, ®_read);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, ®_read);
*result = reg_read.val;
return ret;
struct drm_i915_gem_context_param p = {
.param = I915_CONTEXT_PARAM_GTT_SIZE,
};
- if (!drm_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
+ if (!gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
return p.value;
return 0;
}
+static struct gen_buffer *
+gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
+{
+ struct gen_buffer *buf = malloc(sizeof(struct gen_buffer));
+ if (!buf)
+ return NULL;
+
+ struct iris_bufmgr *bufmgr = (struct iris_bufmgr *)driver_ctx;
+
+ struct iris_bo *bo =
+ iris_bo_alloc_tiled(bufmgr, "aux-map", size, 64 * 1024,
+ IRIS_MEMZONE_OTHER, I915_TILING_NONE, 0, 0);
+
+ buf->driver_bo = bo;
+ buf->gpu = bo->gtt_offset;
+ buf->gpu_end = buf->gpu + bo->size;
+ buf->map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+ return buf;
+}
+
+static void
+gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
+{
+ iris_bo_unreference((struct iris_bo*)buffer->driver_bo);
+ free(buffer);
+}
+
+static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
+ .alloc = gen_aux_map_buffer_alloc,
+ .free = gen_aux_map_buffer_free,
+};
+
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
*
* \param fd File descriptor of the opened DRM device.
*/
-struct iris_bufmgr *
-iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
+static struct iris_bufmgr *
+iris_bufmgr_create(struct gen_device_info *devinfo, int fd, bool bo_reuse)
{
uint64_t gtt_size = iris_gtt_size(fd);
if (gtt_size <= IRIS_MEMZONE_OTHER_START)
* Don't do this! Ensure that each library/bufmgr has its own device
* fd so that its namespace does not clash with another.
*/
- bufmgr->fd = fd;
+ bufmgr->fd = dup(fd);
+
+ p_atomic_set(&bufmgr->refcount, 1);
if (mtx_init(&bufmgr->lock, mtx_plain) != 0) {
+ close(bufmgr->fd);
free(bufmgr);
return NULL;
}
+ list_inithead(&bufmgr->zombie_list);
+
bufmgr->has_llc = devinfo->has_llc;
+ bufmgr->bo_reuse = bo_reuse;
STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
const uint64_t _4GB = 1ull << 32;
+ const uint64_t _2GB = 1ul << 31;
+
+ /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
+ const uint64_t _4GB_minus_1 = _4GB - PAGE_SIZE;
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
- PAGE_SIZE, _4GB - PAGE_SIZE);
+ PAGE_SIZE, _4GB_minus_1 - PAGE_SIZE);
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
IRIS_MEMZONE_SURFACE_START,
- _4GB - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
+ _4GB_minus_1 - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
+ /* TODO: Why does limiting to 2GB help some state items on gen12?
+ * - CC Viewport Pointer
+ * - Blend State Pointer
+ * - Color Calc State Pointer
+ */
+ const uint64_t dynamic_pool_size =
+ (devinfo->gen >= 12 ? _2GB : _4GB_minus_1) - IRIS_BORDER_COLOR_POOL_SIZE;
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
IRIS_MEMZONE_DYNAMIC_START + IRIS_BORDER_COLOR_POOL_SIZE,
- _4GB - IRIS_BORDER_COLOR_POOL_SIZE);
+ dynamic_pool_size);
+
+ /* Leave the last 4GB out of the high vma range, so that no state
+ * base address + size can overflow 48 bits.
+ */
util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
IRIS_MEMZONE_OTHER_START,
- gtt_size - IRIS_MEMZONE_OTHER_START);
-
- // XXX: driconf
- bufmgr->bo_reuse = env_var_as_boolean("bo_reuse", true);
+ (gtt_size - _4GB) - IRIS_MEMZONE_OTHER_START);
init_cache_buckets(bufmgr);
bufmgr->name_table =
- _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
+ _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
bufmgr->handle_table =
- _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
+ _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
+
+ if (devinfo->gen >= 12) {
+ bufmgr->aux_map_ctx = gen_aux_map_init(bufmgr, &aux_map_allocator,
+ devinfo);
+ assert(bufmgr->aux_map_ctx);
+ }
+
+ return bufmgr;
+}
+
+static struct iris_bufmgr *
+iris_bufmgr_ref(struct iris_bufmgr *bufmgr)
+{
+ p_atomic_inc(&bufmgr->refcount);
+ return bufmgr;
+}
+
+void
+iris_bufmgr_unref(struct iris_bufmgr *bufmgr)
+{
+ mtx_lock(&global_bufmgr_list_mutex);
+ if (p_atomic_dec_zero(&bufmgr->refcount)) {
+ list_del(&bufmgr->link);
+ iris_bufmgr_destroy(bufmgr);
+ }
+ mtx_unlock(&global_bufmgr_list_mutex);
+}
+
+/**
+ * Gets an already existing GEM buffer manager or create a new one.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ */
+struct iris_bufmgr *
+iris_bufmgr_get_for_fd(struct gen_device_info *devinfo, int fd, bool bo_reuse)
+{
+ struct stat st;
+
+ if (fstat(fd, &st))
+ return NULL;
+
+ struct iris_bufmgr *bufmgr = NULL;
+
+ mtx_lock(&global_bufmgr_list_mutex);
+ list_for_each_entry(struct iris_bufmgr, iter_bufmgr, &global_bufmgr_list, link) {
+ struct stat iter_st;
+ if (fstat(iter_bufmgr->fd, &iter_st))
+ continue;
+
+ if (st.st_rdev == iter_st.st_rdev) {
+ assert(iter_bufmgr->bo_reuse == bo_reuse);
+ bufmgr = iris_bufmgr_ref(iter_bufmgr);
+ goto unlock;
+ }
+ }
+
+ bufmgr = iris_bufmgr_create(devinfo, fd, bo_reuse);
+ list_addtail(&bufmgr->link, &global_bufmgr_list);
+
+ unlock:
+ mtx_unlock(&global_bufmgr_list_mutex);
return bufmgr;
}
+
+int
+iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr)
+{
+ return bufmgr->fd;
+}
+
+void*
+iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr)
+{
+ return bufmgr->aux_map_ctx;
+}