/**************************************************************************
*
- * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright 2006-2008 VMware, Inc., USA
* All Rights Reserved.
*
* Permission is hereby granted, FREE of charge, to any person obtaining a
*
* @sa http://en.wikipedia.org/wiki/Slab_allocation
*
- * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * @author Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ * @author Jose Fonseca <jfonseca@vmware.com>
*/
#include "pipe/p_compiler.h"
-#include "pipe/p_error.h"
#include "util/u_debug.h"
-#include "pipe/p_thread.h"
+#include "os/os_thread.h"
#include "pipe/p_defines.h"
#include "util/u_memory.h"
-#include "util/u_double_list.h"
+#include "util/list.h"
#include "util/u_time.h"
#include "pb_buffer.h"
unsigned mapCount;
/** Offset relative to the start of the slab buffer. */
- size_t start;
+ pb_size start;
/** Use when validating, to signal that all mappings are finished */
/* TODO: Actually validation does not reach this stage yet */
- pipe_condvar event;
+ cnd_t event;
};
{
struct list_head head;
struct list_head freeBuffers;
- size_t numBuffers;
- size_t numFree;
+ pb_size numBuffers;
+ pb_size numFree;
struct pb_slab_buffer *buffers;
struct pb_slab_manager *mgr;
struct pb_manager *provider;
/** Size of the buffers we hand on downstream */
- size_t bufSize;
+ pb_size bufSize;
/** Size of the buffers we request upstream */
- size_t slabSize;
+ pb_size slabSize;
/**
* Alignment, usage to be used to allocate the slab buffers.
*/
struct list_head slabs;
- pipe_mutex mutex;
+ mtx_t mutex;
};
struct pb_manager *provider;
- size_t minBufSize;
- size_t maxBufSize;
+ pb_size minBufSize;
+ pb_size maxBufSize;
/** @sa pb_slab_manager::desc */
struct pb_desc desc;
unsigned numBuckets;
- size_t *bucketSizes;
+ pb_size *bucketSizes;
/** Array of pb_slab_manager, one for each bucket size */
struct pb_manager **buckets;
};
-static INLINE struct pb_slab_buffer *
+static inline struct pb_slab_buffer *
pb_slab_buffer(struct pb_buffer *buf)
{
assert(buf);
}
-static INLINE struct pb_slab_manager *
+static inline struct pb_slab_manager *
pb_slab_manager(struct pb_manager *mgr)
{
assert(mgr);
}
-static INLINE struct pb_slab_range_manager *
+static inline struct pb_slab_range_manager *
pb_slab_range_manager(struct pb_manager *mgr)
{
assert(mgr);
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
- assert(buf->base.base.reference.count == 0);
+ assert(!pipe_is_referenced(&buf->base.reference));
buf->mapCount = 0;
FREE(slab);
}
- pipe_mutex_unlock(mgr->mutex);
+ mtx_unlock(&mgr->mutex);
}
static void *
pb_slab_buffer_map(struct pb_buffer *_buf,
- unsigned flags)
+ unsigned flags,
+ void *flush_ctx)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ /* XXX: it will be necessary to remap here to propagate flush_ctx */
+
++buf->mapCount;
return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
}
--buf->mapCount;
if (buf->mapCount == 0)
- pipe_condvar_broadcast(buf->event);
+ cnd_broadcast(&buf->event);
}
static void
pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
- unsigned *offset)
+ pb_size *offset)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
pb_get_base_buffer(buf->slab->bo, base_buf, offset);
/* Note down the slab virtual address. All mappings are accessed directly
* through this address so it is required that the buffer is pinned. */
slab->virtual = pb_map(slab->bo,
- PIPE_BUFFER_USAGE_CPU_READ |
- PIPE_BUFFER_USAGE_CPU_WRITE);
+ PB_USAGE_CPU_READ |
+ PB_USAGE_CPU_WRITE, NULL);
if(!slab->virtual) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err1;
}
pb_unmap(slab->bo);
- numBuffers = slab->bo->base.size / mgr->bufSize;
+ numBuffers = slab->bo->size / mgr->bufSize;
slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
if (!slab->buffers) {
buf = slab->buffers;
for (i=0; i < numBuffers; ++i) {
- pipe_reference_init(&buf->base.base.reference, 0);
- buf->base.base.size = mgr->bufSize;
- buf->base.base.alignment = 0;
- buf->base.base.usage = 0;
+ pipe_reference_init(&buf->base.reference, 0);
+ buf->base.size = mgr->bufSize;
+ buf->base.alignment = 0;
+ buf->base.usage = 0;
buf->base.vtbl = &pb_slab_buffer_vtbl;
buf->slab = slab;
buf->start = i* mgr->bufSize;
buf->mapCount = 0;
- pipe_condvar_init(buf->event);
+ cnd_init(&buf->event);
LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
static struct pb_buffer *
pb_slab_manager_create_buffer(struct pb_manager *_mgr,
- size_t size,
+ pb_size size,
const struct pb_desc *desc)
{
struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
/* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {
(void) pb_slab_create(mgr);
if (mgr->slabs.next == &mgr->slabs) {
- pipe_mutex_unlock(mgr->mutex);
+ mtx_unlock(&mgr->mutex);
return NULL;
}
}
list = slab->freeBuffers.next;
LIST_DELINIT(list);
- pipe_mutex_unlock(mgr->mutex);
+ mtx_unlock(&mgr->mutex);
buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
- pipe_reference_init(&buf->base.base.reference, 1);
- buf->base.base.alignment = desc->alignment;
- buf->base.base.usage = desc->usage;
+ pipe_reference_init(&buf->base.reference, 1);
+ buf->base.alignment = desc->alignment;
+ buf->base.usage = desc->usage;
return &buf->base;
}
struct pb_manager *
pb_slab_manager_create(struct pb_manager *provider,
- size_t bufSize,
- size_t slabSize,
+ pb_size bufSize,
+ pb_size slabSize,
const struct pb_desc *desc)
{
struct pb_slab_manager *mgr;
LIST_INITHEAD(&mgr->slabs);
- pipe_mutex_init(mgr->mutex);
+ (void) mtx_init(&mgr->mutex, mtx_plain);
return &mgr->base;
}
static struct pb_buffer *
pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
- size_t size,
+ pb_size size,
const struct pb_desc *desc)
{
struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
- size_t bufSize;
+ pb_size bufSize;
+ pb_size reqSize = size;
unsigned i;
+ if(desc->alignment > reqSize)
+ reqSize = desc->alignment;
+
bufSize = mgr->minBufSize;
for (i = 0; i < mgr->numBuckets; ++i) {
- if(bufSize >= size)
+ if(bufSize >= reqSize)
return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
bufSize *= 2;
}
struct pb_manager *
pb_slab_range_manager_create(struct pb_manager *provider,
- size_t minBufSize,
- size_t maxBufSize,
- size_t slabSize,
+ pb_size minBufSize,
+ pb_size maxBufSize,
+ pb_size slabSize,
const struct pb_desc *desc)
{
struct pb_slab_range_manager *mgr;
- size_t bufSize;
+ pb_size bufSize;
unsigned i;
- if(!provider)
+ if (!provider)
return NULL;
mgr = CALLOC_STRUCT(pb_slab_range_manager);