#include <time.h>
#include "errno.h"
-#ifndef ETIME
-#define ETIME ETIMEDOUT
-#endif
#include "common/gen_clflush.h"
#include "dev/gen_debug.h"
#include "common/gen_gem.h"
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
-/**
- * Call ioctl, restarting if it is interupted
- */
-int
-drm_ioctl(int fd, unsigned long request, void *arg)
-{
- int ret;
-
- do {
- ret = ioctl(fd, request, arg);
- } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
- return ret;
-}
-
static inline int
atomic_add_unless(int *v, int add, int unless)
{
struct hash_table *name_table;
struct hash_table *handle_table;
+ /**
+ * List of BOs which we've effectively freed, but are hanging on to
+ * until they're idle before closing and returning the VMA.
+ */
+ struct list_head zombie_list;
+
struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
bool has_llc:1;
}
static struct iris_bo *
-hash_find_bo(struct hash_table *ht, unsigned int key)
+find_and_ref_external_bo(struct hash_table *ht, unsigned int key)
{
struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
- return entry ? (struct iris_bo *) entry->data : NULL;
+ struct iris_bo *bo = entry ? entry->data : NULL;
+
+ if (bo) {
+ assert(bo->external);
+ assert(!bo->reusable);
+
+ /* Being non-reusable, the BO cannot be in the cache lists, but it
+ * may be in the zombie list if it had reached zero references, but
+ * we hadn't yet closed it...and then reimported the same BO. If it
+ * is, then remove it since it's now been resurrected.
+ */
+ if (bo->head.prev || bo->head.next)
+ list_del(&bo->head);
+
+ iris_bo_reference(bo);
+ }
+
+ return bo;
}
/**
struct iris_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
if (ret == 0) {
bo->idle = !busy.busy;
return busy.busy;
.retained = 1,
};
- drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+ gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
return madv.retained;
}
-/* drop the oldest entries that have been purged by the kernel */
-static void
-iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
- struct bo_cache_bucket *bucket)
-{
- list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
- if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
- break;
-
- list_del(&bo->head);
- bo_free(bo);
- }
-}
-
static struct iris_bo *
bo_calloc(void)
{
return bo;
}
+static struct iris_bo *
+alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
+ struct bo_cache_bucket *bucket,
+ uint32_t alignment,
+ enum iris_memory_zone memzone,
+ unsigned flags,
+ bool match_zone)
+{
+ if (!bucket)
+ return NULL;
+
+ struct iris_bo *bo = NULL;
+
+ list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) {
+ /* Try a little harder to find one that's already in the right memzone */
+ if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset))
+ continue;
+
+ /* If the last BO in the cache is busy, there are no idle BOs. Bail,
+ * either falling back to a non-matching memzone, or if that fails,
+ * allocating a fresh buffer.
+ */
+ if (iris_bo_busy(cur))
+ return NULL;
+
+ list_del(&cur->head);
+
+ /* Tell the kernel we need this BO. If it still exists, we're done! */
+ if (iris_bo_madvise(cur, I915_MADV_WILLNEED)) {
+ bo = cur;
+ break;
+ }
+
+ /* This BO was purged, throw it out and keep looking. */
+ bo_free(cur);
+ }
+
+ if (!bo)
+ return NULL;
+
+ /* If the cached BO isn't in the right memory zone, or the alignment
+ * isn't sufficient, free the old memory and assign it a new address.
+ */
+ if (memzone != iris_memzone_for_address(bo->gtt_offset) ||
+ bo->gtt_offset % alignment != 0) {
+ vma_free(bufmgr, bo->gtt_offset, bo->size);
+ bo->gtt_offset = 0ull;
+ }
+
+ /* Zero the contents if necessary. If this fails, fall back to
+ * allocating a fresh BO, which will always be zeroed by the kernel.
+ */
+ if (flags & BO_ALLOC_ZEROED) {
+ void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+ if (map) {
+ memset(map, 0, bo->size);
+ } else {
+ bo_free(bo);
+ return NULL;
+ }
+ }
+
+ return bo;
+}
+
static struct iris_bo *
alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size)
{
/* All new BOs we get from the kernel are zeroed, so we don't need to
* worry about that here.
*/
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
free(bo);
return NULL;
}
bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
bo->stride = 0;
+ /* Calling set_domain() will allocate pages for the BO outside of the
+ * struct mutex lock in the kernel, which is more efficient than waiting
+ * to create them during the first execbuf that uses the BO.
+ */
+ struct drm_i915_gem_set_domain sd = {
+ .handle = bo->gem_handle,
+ .read_domains = I915_GEM_DOMAIN_CPU,
+ .write_domain = 0,
+ };
+
+ if (gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
+ bo_free(bo);
+ return NULL;
+ }
+
return bo;
}
bo_alloc_internal(struct iris_bufmgr *bufmgr,
const char *name,
uint64_t size,
+ uint32_t alignment,
enum iris_memory_zone memzone,
unsigned flags,
uint32_t tiling_mode,
{
struct iris_bo *bo;
unsigned int page_size = getpagesize();
- struct bo_cache_bucket *bucket;
- bool alloc_from_cache;
- uint64_t bo_size;
- bool zeroed = false;
- bool alloc_pages = false;
-
- if (flags & BO_ALLOC_ZEROED)
- zeroed = true;
-
- /* Round the allocated size up to a power of two number of pages. */
- bucket = bucket_for_size(bufmgr, size);
+ struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
- /* If we don't have caching at this size, don't actually round the
- * allocation up.
+ /* Round the size up to the bucket size, or if we don't have caching
+ * at this size, a multiple of the page size.
*/
- if (bucket == NULL) {
- bo_size = MAX2(ALIGN(size, page_size), page_size);
- } else {
- bo_size = bucket->size;
- }
+ uint64_t bo_size =
+ bucket ? bucket->size : MAX2(ALIGN(size, page_size), page_size);
mtx_lock(&bufmgr->lock);
- /* Get a buffer out of the cache if available */
-retry:
- alloc_from_cache = false;
- if (bucket != NULL && !list_empty(&bucket->head)) {
- /* If the last BO in the cache is idle, then reuse it. Otherwise,
- * allocate a fresh buffer to avoid stalling.
- */
- bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
- if (!iris_bo_busy(bo)) {
- alloc_from_cache = true;
- list_del(&bo->head);
- }
- if (alloc_from_cache) {
- if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
- bo_free(bo);
- iris_bo_cache_purge_bucket(bufmgr, bucket);
- goto retry;
- }
-
- if (zeroed) {
- void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
- if (!map) {
- bo_free(bo);
- goto retry;
- }
- memset(map, 0, bo_size);
- }
- }
+ /* Get a buffer out of the cache if available. First, we try to find
+ * one with a matching memory zone so we can avoid reallocating VMA.
+ */
+ bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags, true);
+
+ /* If that fails, we try for any cached BO, without matching memzone. */
+ if (!bo) {
+ bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags,
+ false);
}
- if (alloc_from_cache) {
- /* If the cached BO isn't in the right memory zone, free the old
- * memory and assign it a new address.
- */
- if (memzone != iris_memzone_for_address(bo->gtt_offset)) {
- vma_free(bufmgr, bo->gtt_offset, bo->size);
- bo->gtt_offset = 0ull;
- }
- } else {
- alloc_pages = true;
+ mtx_unlock(&bufmgr->lock);
+
+ if (!bo) {
bo = alloc_fresh_bo(bufmgr, bo_size);
if (!bo)
- goto err;
+ return NULL;
+ }
+
+ if (bo->gtt_offset == 0ull) {
+ mtx_lock(&bufmgr->lock);
+ bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, alignment);
+ mtx_unlock(&bufmgr->lock);
+
+ if (bo->gtt_offset == 0ull)
+ goto err_free;
}
+ if (bo_set_tiling_internal(bo, tiling_mode, stride))
+ goto err_free;
+
bo->name = name;
p_atomic_set(&bo->refcount, 1);
bo->reusable = bucket && bufmgr->bo_reuse;
if (memzone < IRIS_MEMZONE_OTHER)
bo->kflags |= EXEC_OBJECT_CAPTURE;
- if (bo->gtt_offset == 0ull) {
- bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);
-
- if (bo->gtt_offset == 0ull)
- goto err_free;
- }
-
- if (bo_set_tiling_internal(bo, tiling_mode, stride))
- goto err_free;
-
- if (alloc_pages) {
- /* Calling set_domain() will allocate pages for the BO outside of the
- * struct mutex lock in the kernel, which is more efficient than waiting
- * to create them during the first execbuf that uses the BO.
- */
- struct drm_i915_gem_set_domain sd = {
- .handle = bo->gem_handle,
- .read_domains = I915_GEM_DOMAIN_CPU,
- .write_domain = 0,
- };
-
- if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
- goto err_free;
- }
-
- mtx_unlock(&bufmgr->lock);
-
if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
struct drm_i915_gem_caching arg = {
.handle = bo->gem_handle,
.caching = 1,
};
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
bo->cache_coherent = true;
bo->reusable = false;
}
err_free:
bo_free(bo);
-err:
- mtx_unlock(&bufmgr->lock);
return NULL;
}
uint64_t size,
enum iris_memory_zone memzone)
{
- return bo_alloc_internal(bufmgr, name, size, memzone,
+ return bo_alloc_internal(bufmgr, name, size, 1, memzone,
0, I915_TILING_NONE, 0);
}
struct iris_bo *
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
- uint64_t size, enum iris_memory_zone memzone,
+ uint64_t size, uint32_t alignment,
+ enum iris_memory_zone memzone,
uint32_t tiling_mode, uint32_t pitch, unsigned flags)
{
- return bo_alloc_internal(bufmgr, name, size, memzone,
+ return bo_alloc_internal(bufmgr, name, size, alignment, memzone,
flags, tiling_mode, pitch);
}
.user_ptr = (uintptr_t)ptr,
.user_size = size,
};
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
goto err_free;
bo->gem_handle = arg.handle;
.handle = bo->gem_handle,
.read_domains = I915_GEM_DOMAIN_CPU,
};
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
goto err_close;
bo->name = name;
return bo;
err_close:
- drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
+ gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
err_free:
free(bo);
return NULL;
* provides a sufficiently fast match.
*/
mtx_lock(&bufmgr->lock);
- bo = hash_find_bo(bufmgr->name_table, handle);
- if (bo) {
- iris_bo_reference(bo);
+ bo = find_and_ref_external_bo(bufmgr->name_table, handle);
+ if (bo)
goto out;
- }
struct drm_gem_open open_arg = { .name = handle };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
* object from the kernel before by looking through the list
* again for a matching gem_handle
*/
- bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
- if (bo) {
- iris_bo_reference(bo);
+ bo = find_and_ref_external_bo(bufmgr->handle_table, open_arg.handle);
+ if (bo)
goto out;
- }
bo = bo_calloc();
if (!bo)
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
- ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+ ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
if (ret != 0)
goto err_unref;
}
static void
-bo_free(struct iris_bo *bo)
+bo_close(struct iris_bo *bo)
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- if (bo->map_cpu && !bo->userptr) {
- VG_NOACCESS(bo->map_cpu, bo->size);
- munmap(bo->map_cpu, bo->size);
- }
- if (bo->map_wc) {
- VG_NOACCESS(bo->map_wc, bo->size);
- munmap(bo->map_wc, bo->size);
- }
- if (bo->map_gtt) {
- VG_NOACCESS(bo->map_gtt, bo->size);
- munmap(bo->map_gtt, bo->size);
- }
-
if (bo->external) {
struct hash_entry *entry;
/* Close this object */
struct drm_gem_close close = { .handle = bo->gem_handle };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo->gem_handle, bo->name, strerror(errno));
}
+ /* Return the VMA for reuse */
vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
free(bo);
}
+static void
+bo_free(struct iris_bo *bo)
+{
+ struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+ if (bo->map_cpu && !bo->userptr) {
+ VG_NOACCESS(bo->map_cpu, bo->size);
+ munmap(bo->map_cpu, bo->size);
+ }
+ if (bo->map_wc) {
+ VG_NOACCESS(bo->map_wc, bo->size);
+ munmap(bo->map_wc, bo->size);
+ }
+ if (bo->map_gtt) {
+ VG_NOACCESS(bo->map_gtt, bo->size);
+ munmap(bo->map_gtt, bo->size);
+ }
+
+ if (bo->idle) {
+ bo_close(bo);
+ } else {
+ /* Defer closing the GEM BO and returning the VMA for reuse until the
+ * BO is idle. Just move it to the dead list for now.
+ */
+ list_addtail(&bo->head, &bufmgr->zombie_list);
+ }
+}
+
/** Frees all cached buffers significantly older than @time. */
static void
cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
}
}
+ list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
+ /* Stop once we reach a busy BO - all others past this point were
+ * freed more recently so are likely also busy.
+ */
+ if (!bo->idle && iris_bo_busy(bo))
+ break;
+
+ list_del(&bo->head);
+ bo_close(bo);
+ }
+
bufmgr->time = time;
}
.handle = bo->gem_handle,
.size = bo->size,
};
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
.size = bo->size,
.flags = I915_MMAP_WC,
};
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
/* Get the fake offset back... */
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
.bo_handle = bo->gem_handle,
.timeout_ns = timeout_ns,
};
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
if (ret != 0)
return -errno;
}
}
+ /* Close any buffer objects on the dead list. */
+ list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
+ list_del(&bo->head);
+ bo_close(bo);
+ }
+
_mesa_hash_table_destroy(bufmgr->name_table, NULL);
_mesa_hash_table_destroy(bufmgr->handle_table, NULL);
* for named buffers, we must not create two bo's pointing at the same
* kernel object
*/
- bo = hash_find_bo(bufmgr->handle_table, handle);
- if (bo) {
- iris_bo_reference(bo);
+ bo = find_and_ref_external_bo(bufmgr->handle_table, handle);
+ if (bo)
goto out;
- }
bo = bo_calloc();
if (!bo)
bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
goto err;
bo->tiling_mode = get_tiling.tiling_mode;
if (!bo->global_name) {
struct drm_gem_flink flink = { .handle = bo->gem_handle };
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
return -errno;
mtx_lock(&bufmgr->lock);
iris_create_hw_context(struct iris_bufmgr *bufmgr)
{
struct drm_i915_gem_context_create create = { };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
return 0;
int err;
err = 0;
- if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
+ if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
err = -errno;
return err;
struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
if (ctx_id != 0 &&
- drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
+ gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
strerror(errno));
}
iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
{
struct drm_i915_reg_read reg_read = { .offset = offset };
- int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, ®_read);
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, ®_read);
*result = reg_read.val;
return ret;
struct drm_i915_gem_context_param p = {
.param = I915_CONTEXT_PARAM_GTT_SIZE,
};
- if (!drm_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
+ if (!gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
return p.value;
return 0;
* \param fd File descriptor of the opened DRM device.
*/
struct iris_bufmgr *
-iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
+iris_bufmgr_init(struct gen_device_info *devinfo, int fd, bool bo_reuse)
{
uint64_t gtt_size = iris_gtt_size(fd);
if (gtt_size <= IRIS_MEMZONE_OTHER_START)
return NULL;
}
+ list_inithead(&bufmgr->zombie_list);
+
bufmgr->has_llc = devinfo->has_llc;
+ bufmgr->bo_reuse = bo_reuse;
STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
const uint64_t _4GB = 1ull << 32;
IRIS_MEMZONE_OTHER_START,
(gtt_size - _4GB) - IRIS_MEMZONE_OTHER_START);
- // XXX: driconf
- bufmgr->bo_reuse = env_var_as_boolean("bo_reuse", true);
-
init_cache_buckets(bufmgr);
bufmgr->name_table =