* @file
* S-lab pool implementation.
*
+ * @sa http://en.wikipedia.org/wiki/Slab_allocation
+ *
* @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* @author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "pipe/p_compiler.h"
-#include "pipe/p_error.h"
-#include "pipe/p_debug.h"
-#include "pipe/p_thread.h"
+#include "util/u_debug.h"
+#include "os/os_thread.h"
#include "pipe/p_defines.h"
-#include "pipe/p_util.h"
+#include "util/u_memory.h"
#include "util/u_double_list.h"
#include "util/u_time.h"
struct pb_slab;
+
+/**
+ * Buffer in a slab.
+ *
+ * Sub-allocation of a contiguous buffer.
+ */
struct pb_slab_buffer
{
struct pb_buffer base;
struct pb_slab *slab;
+
struct list_head head;
+
unsigned mapCount;
- size_t start;
- _glthread_Cond event;
+
+ /** Offset relative to the start of the slab buffer. */
+ pb_size start;
+
+ /** Use when validating, to signal that all mappings are finished */
+ /* TODO: Actually validation does not reach this stage yet */
+ pipe_condvar event;
};
+
+/**
+ * Slab -- a contiguous piece of memory.
+ */
struct pb_slab
{
struct list_head head;
struct list_head freeBuffers;
- size_t numBuffers;
- size_t numFree;
+ pb_size numBuffers;
+ pb_size numFree;
+
struct pb_slab_buffer *buffers;
struct pb_slab_manager *mgr;
+ /** Buffer from the provider */
struct pb_buffer *bo;
+
void *virtual;
};
+
+/**
+ * It adds/removes slabs as needed in order to meet the allocation/destruction
+ * of individual buffers.
+ */
struct pb_slab_manager
{
struct pb_manager base;
+ /** From where we get our buffers */
struct pb_manager *provider;
- size_t bufSize;
- size_t slabSize;
+
+ /** Size of the buffers we hand on downstream */
+ pb_size bufSize;
+
+ /** Size of the buffers we request upstream */
+ pb_size slabSize;
+
+ /**
+ * Alignment, usage to be used to allocate the slab buffers.
+ *
+ * We can only provide buffers which are consistent (in alignment, usage)
+ * with this description.
+ */
struct pb_desc desc;
+ /**
+ * Partial slabs
+ *
+ * Full slabs are not stored in any list. Empty slabs are destroyed
+ * immediatly.
+ */
struct list_head slabs;
- struct list_head freeSlabs;
- _glthread_Mutex mutex;
+ pipe_mutex mutex;
};
+
/**
+ * Wrapper around several slabs, therefore capable of handling buffers of
+ * multiple sizes.
+ *
+ * This buffer manager just dispatches buffer allocations to the appropriate slab
+ * manager, according to the requested buffer size, or by passes the slab
+ * managers altogether for even greater sizes.
+ *
* The data of this structure remains constant after
* initialization and thus needs no mutex protection.
*/
struct pb_manager base;
struct pb_manager *provider;
- size_t minBufSize;
- size_t maxBufSize;
+
+ pb_size minBufSize;
+ pb_size maxBufSize;
+
+ /** @sa pb_slab_manager::desc */
struct pb_desc desc;
unsigned numBuckets;
- size_t *bucketSizes;
+ pb_size *bucketSizes;
+
+ /** Array of pb_slab_manager, one for each bucket size */
struct pb_manager **buckets;
};
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
- _glthread_LOCK_MUTEX(mgr->mutex);
+ pipe_mutex_lock(mgr->mutex);
- assert(buf->base.base.refcount == 0);
+ assert(!pipe_is_referenced(&buf->base.reference));
buf->mapCount = 0;
if (slab->head.next == &slab->head)
LIST_ADDTAIL(&slab->head, &mgr->slabs);
+ /* If the slab becomes totally empty, free it */
if (slab->numFree == slab->numBuffers) {
list = &slab->head;
- LIST_DEL(list);
- LIST_ADDTAIL(list, &mgr->freeSlabs);
+ LIST_DELINIT(list);
+ pb_reference(&slab->bo, NULL);
+ FREE(slab->buffers);
+ FREE(slab);
}
- if (mgr->slabs.next == &mgr->slabs || slab->numFree
- != slab->numBuffers) {
-
- struct list_head *next;
-
- for (list = mgr->freeSlabs.next, next = list->next; list
- != &mgr->freeSlabs; list = next, next = list->next) {
-
- slab = LIST_ENTRY(struct pb_slab, list, head);
-
- LIST_DELINIT(list);
- pb_reference(&slab->bo, NULL);
- FREE(slab->buffers);
- FREE(slab);
- }
- }
-
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
}
static void *
pb_slab_buffer_map(struct pb_buffer *_buf,
- unsigned flags)
+ unsigned flags,
+ void *flush_ctx)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ /* XXX: it will be necessary to remap here to propagate flush_ctx */
+
++buf->mapCount;
return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
}
--buf->mapCount;
if (buf->mapCount == 0)
- _glthread_COND_BROADCAST(buf->event);
+ pipe_condvar_broadcast(buf->event);
+}
+
+
+static enum pipe_error
+pb_slab_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ return pb_validate(buf->slab->bo, vl, flags);
+}
+
+
+static void
+pb_slab_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ pb_fence(buf->slab->bo, fence);
}
static void
pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
- unsigned *offset)
+ pb_size *offset)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
pb_get_base_buffer(buf->slab->bo, base_buf, offset);
pb_slab_buffer_destroy,
pb_slab_buffer_map,
pb_slab_buffer_unmap,
+ pb_slab_buffer_validate,
+ pb_slab_buffer_fence,
pb_slab_buffer_get_base_buffer
};
+/**
+ * Create a new slab.
+ *
+ * Called when we ran out of free slabs.
+ */
static enum pipe_error
pb_slab_create(struct pb_slab_manager *mgr)
{
if (!slab)
return PIPE_ERROR_OUT_OF_MEMORY;
- /*
- * FIXME: We should perhaps allow some variation in slabsize in order
- * to efficiently reuse slabs.
- */
-
slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
if(!slab->bo) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err0;
}
+ /* Note down the slab virtual address. All mappings are accessed directly
+ * through this address so it is required that the buffer is pinned. */
slab->virtual = pb_map(slab->bo,
- PIPE_BUFFER_USAGE_CPU_READ |
- PIPE_BUFFER_USAGE_CPU_WRITE);
+ PB_USAGE_CPU_READ |
+ PB_USAGE_CPU_WRITE, NULL);
if(!slab->virtual) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err1;
}
-
pb_unmap(slab->bo);
- numBuffers = slab->bo->base.size / mgr->bufSize;
+ numBuffers = slab->bo->size / mgr->bufSize;
slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
if (!slab->buffers) {
buf = slab->buffers;
for (i=0; i < numBuffers; ++i) {
- buf->base.base.refcount = 0;
- buf->base.base.size = mgr->bufSize;
- buf->base.base.alignment = 0;
- buf->base.base.usage = 0;
+ pipe_reference_init(&buf->base.reference, 0);
+ buf->base.size = mgr->bufSize;
+ buf->base.alignment = 0;
+ buf->base.usage = 0;
buf->base.vtbl = &pb_slab_buffer_vtbl;
buf->slab = slab;
buf->start = i* mgr->bufSize;
buf->mapCount = 0;
- _glthread_INIT_COND(buf->event);
+ pipe_condvar_init(buf->event);
LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
}
+ /* Add this slab to the list of partial slabs */
LIST_ADDTAIL(&slab->head, &mgr->slabs);
return PIPE_OK;
static struct pb_buffer *
pb_slab_manager_create_buffer(struct pb_manager *_mgr,
- size_t size,
+ pb_size size,
const struct pb_desc *desc)
{
struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
struct list_head *list;
/* check size */
- assert(size == mgr->bufSize);
- if(size != mgr->bufSize)
+ assert(size <= mgr->bufSize);
+ if(size > mgr->bufSize)
return NULL;
/* check if we can provide the requested alignment */
if(!pb_check_alignment(desc->alignment, mgr->bufSize))
return NULL;
- /* XXX: check for compatible buffer usage too? */
+ assert(pb_check_usage(desc->usage, mgr->desc.usage));
+ if(!pb_check_usage(desc->usage, mgr->desc.usage))
+ return NULL;
+
+ pipe_mutex_lock(mgr->mutex);
- _glthread_LOCK_MUTEX(mgr->mutex);
+ /* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {
(void) pb_slab_create(mgr);
if (mgr->slabs.next == &mgr->slabs) {
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
return NULL;
}
}
+
+ /* Allocate the buffer from a partial (or just created) slab */
list = mgr->slabs.next;
slab = LIST_ENTRY(struct pb_slab, list, head);
+
+ /* If totally full remove from the partial slab list */
if (--slab->numFree == 0)
LIST_DELINIT(list);
list = slab->freeBuffers.next;
LIST_DELINIT(list);
- _glthread_UNLOCK_MUTEX(mgr->mutex);
+ pipe_mutex_unlock(mgr->mutex);
buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
- ++buf->base.base.refcount;
- buf->base.base.alignment = desc->alignment;
- buf->base.base.usage = desc->usage;
+ pipe_reference_init(&buf->base.reference, 1);
+ buf->base.alignment = desc->alignment;
+ buf->base.usage = desc->usage;
return &buf->base;
}
+static void
+pb_slab_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
static void
pb_slab_manager_destroy(struct pb_manager *_mgr)
{
struct pb_manager *
pb_slab_manager_create(struct pb_manager *provider,
- size_t bufSize,
- size_t slabSize,
+ pb_size bufSize,
+ pb_size slabSize,
const struct pb_desc *desc)
{
struct pb_slab_manager *mgr;
mgr->base.destroy = pb_slab_manager_destroy;
mgr->base.create_buffer = pb_slab_manager_create_buffer;
+ mgr->base.flush = pb_slab_manager_flush;
mgr->provider = provider;
mgr->bufSize = bufSize;
mgr->desc = *desc;
LIST_INITHEAD(&mgr->slabs);
- LIST_INITHEAD(&mgr->freeSlabs);
- _glthread_INIT_MUTEX(mgr->mutex);
+ pipe_mutex_init(mgr->mutex);
return &mgr->base;
}
static struct pb_buffer *
pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
- size_t size,
+ pb_size size,
const struct pb_desc *desc)
{
struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
- size_t bufSize;
+ pb_size bufSize;
+ pb_size reqSize = size;
unsigned i;
+ if(desc->alignment > reqSize)
+ reqSize = desc->alignment;
+
bufSize = mgr->minBufSize;
for (i = 0; i < mgr->numBuckets; ++i) {
- if(bufSize >= size)
+ if(bufSize >= reqSize)
return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
bufSize *= 2;
}
}
+static void
+pb_slab_range_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+
+ /* Individual slabs don't hold any temporary buffers so no need to call them */
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
static void
pb_slab_range_manager_destroy(struct pb_manager *_mgr)
{
struct pb_manager *
pb_slab_range_manager_create(struct pb_manager *provider,
- size_t minBufSize,
- size_t maxBufSize,
- size_t slabSize,
+ pb_size minBufSize,
+ pb_size maxBufSize,
+ pb_size slabSize,
const struct pb_desc *desc)
{
struct pb_slab_range_manager *mgr;
- size_t bufSize;
+ pb_size bufSize;
unsigned i;
+ if(!provider)
+ return NULL;
+
mgr = CALLOC_STRUCT(pb_slab_range_manager);
if (!mgr)
goto out_err0;
mgr->base.destroy = pb_slab_range_manager_destroy;
mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
+ mgr->base.flush = pb_slab_range_manager_flush;
mgr->provider = provider;
mgr->minBufSize = minBufSize;