* There is no obligation of a winsys driver to use this library. And a pipe
* driver should be completly agnostic about it.
*
- * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Jos� Fonseca <jrfonseca@tungstengraphics.com>
*/
#ifndef PB_BUFFER_H_
#include "pipe/p_compiler.h"
#include "pipe/p_debug.h"
-#include "pipe/p_error.h"
#include "pipe/p_state.h"
#include "pipe/p_inlines.h"
struct pb_vtbl;
-struct pb_validate;
-
/**
* Buffer description.
void (*unmap)( struct pb_buffer *buf );
- enum pipe_error (*validate)( struct pb_buffer *buf,
- struct pb_validate *vl,
- unsigned flags );
-
- void (*fence)( struct pb_buffer *buf,
- struct pipe_fence_handle *fence );
-
/**
* Get the base buffer and the offset.
*
void (*get_base_buffer)( struct pb_buffer *buf,
struct pb_buffer **base_buf,
unsigned *offset );
-
};
offset = 0;
return;
}
- assert(buf->vtbl->get_base_buffer);
buf->vtbl->get_base_buffer(buf, base_buf, offset);
}
-static INLINE enum pipe_error
-pb_validate(struct pb_buffer *buf, struct pb_validate *vl, unsigned flags)
-{
- assert(buf);
- if(!buf)
- return PIPE_ERROR;
- assert(buf->vtbl->validate);
- return buf->vtbl->validate(buf, vl, flags);
-}
-
-
-static INLINE void
-pb_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence)
-{
- assert(buf);
- if(!buf)
- return;
- assert(buf->vtbl->fence);
- buf->vtbl->fence(buf, fence);
-}
-
-
static INLINE void
pb_destroy(struct pb_buffer *buf)
{
* \file
* Implementation of fenced buffers.
*
- * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
* \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
*/
#define SUPER(__derived) (&(__derived)->base)
+#define PIPE_BUFFER_USAGE_CPU_READ_WRITE \
+ ( PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE )
+#define PIPE_BUFFER_USAGE_GPU_READ_WRITE \
+ ( PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE )
+#define PIPE_BUFFER_USAGE_WRITE \
+ ( PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_GPU_WRITE )
+
struct fenced_buffer_list
{
unsigned flags;
unsigned mapcount;
- struct pb_validate *vl;
- unsigned validation_flags;
struct pipe_fence_handle *fence;
struct list_head head;
fenced_buffer(struct pb_buffer *buf)
{
assert(buf);
+ assert(buf->vtbl == &fenced_buffer_vtbl);
return (struct fenced_buffer *)buf;
}
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
void *map;
- assert(flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE);
assert(!(flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE));
flags &= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
}
-static enum pipe_error
-fenced_buffer_validate(struct pb_buffer *buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- enum pipe_error ret;
-
- if(!vl) {
- /* invalidate */
- fenced_buf->vl = NULL;
- fenced_buf->validation_flags = 0;
- return PIPE_OK;
- }
-
- assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
- assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
- flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
-
- /* Buffer cannot be validated in two different lists */
- if(fenced_buf->vl && fenced_buf->vl != vl)
- return PIPE_ERROR_RETRY;
-
- /* Do not validate if buffer is still mapped */
- if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
- /* TODO: wait for the thread that mapped the buffer to unmap it */
- return PIPE_ERROR_RETRY;
- }
-
- /* Allow concurrent GPU reads, but serialize GPU writes */
- if(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE) {
- if((fenced_buf->flags | flags) & PIPE_BUFFER_USAGE_GPU_WRITE) {
- _fenced_buffer_finish(fenced_buf);
- }
- }
-
- if(fenced_buf->vl == vl &&
- (fenced_buf->validation_flags & flags) == flags) {
- /* Nothing to do -- buffer already validated */
- return PIPE_OK;
- }
-
- /* Final sanity checking */
- assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
- assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE));
- assert(!fenced_buf->mapcount);
-
- ret = pb_validate(fenced_buf->buffer, vl, flags);
- if (ret != PIPE_OK)
- return ret;
-
- fenced_buf->vl = vl;
- fenced_buf->validation_flags |= flags;
-
- return PIPE_OK;
-}
-
-
-static void
-fenced_buffer_fence(struct pb_buffer *buf,
- struct pipe_fence_handle *fence)
-{
- struct fenced_buffer *fenced_buf;
- struct fenced_buffer_list *fenced_list;
- struct pipe_winsys *winsys;
-
- fenced_buf = fenced_buffer(buf);
- fenced_list = fenced_buf->list;
- winsys = fenced_list->winsys;
-
- if(fence == fenced_buf->fence) {
- /* Nothing to do */
- return;
- }
-
- assert(fenced_buf->vl);
- assert(fenced_buf->validation_flags);
-
- pipe_mutex_lock(fenced_list->mutex);
- if (fenced_buf->fence)
- _fenced_buffer_remove(fenced_list, fenced_buf);
- if (fence) {
- winsys->fence_reference(winsys, &fenced_buf->fence, fence);
- fenced_buf->flags |= fenced_buf->validation_flags;
- _fenced_buffer_add(fenced_buf);
- }
- pipe_mutex_unlock(fenced_list->mutex);
-
- pb_fence(fenced_buf->buffer, fence);
-
- fenced_buf->vl = NULL;
- fenced_buf->validation_flags = 0;
-}
-
-
static void
fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
}
-static const struct pb_vtbl
+const struct pb_vtbl
fenced_buffer_vtbl = {
fenced_buffer_destroy,
fenced_buffer_map,
fenced_buffer_unmap,
- fenced_buffer_validate,
- fenced_buffer_fence,
fenced_buffer_get_base_buffer
};
}
+void
+buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct fenced_buffer *fenced_buf;
+ struct fenced_buffer_list *fenced_list;
+ struct pipe_winsys *winsys;
+ /* FIXME: receive this as a parameter */
+ unsigned flags = fence ? PIPE_BUFFER_USAGE_GPU_READ_WRITE : 0;
+
+ /* This is a public function, so be extra cautious with the buffer passed,
+ * as happens frequently to receive null buffers, or pointer to buffers
+ * other than fenced buffers. */
+ assert(buf);
+ if(!buf)
+ return;
+ assert(buf->vtbl == &fenced_buffer_vtbl);
+ if(buf->vtbl != &fenced_buffer_vtbl)
+ return;
+
+ fenced_buf = fenced_buffer(buf);
+ fenced_list = fenced_buf->list;
+ winsys = fenced_list->winsys;
+
+ if(!fence || fence == fenced_buf->fence) {
+ /* Handle the same fence case specially, not only because it is a fast
+ * path, but mostly to avoid serializing two writes with the same fence,
+ * as that would bring the hardware down to synchronous operation without
+ * any benefit.
+ */
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+ return;
+ }
+
+ pipe_mutex_lock(fenced_list->mutex);
+ if (fenced_buf->fence)
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ if (fence) {
+ winsys->fence_reference(winsys, &fenced_buf->fence, fence);
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+ _fenced_buffer_add(fenced_buf);
+ }
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+
+
struct fenced_buffer_list *
fenced_buffer_list_create(struct pipe_winsys *winsys)
{
* Between the handle's destruction, and the fence signalling, the buffer is
* stored in a fenced buffer list.
*
- * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
*/
#ifndef PB_BUFFER_FENCED_H_
struct fenced_buffer_list;
+/**
+ * The fenced buffer's virtual function table.
+ *
+ * NOTE: Made public for debugging purposes.
+ */
+extern const struct pb_vtbl fenced_buffer_vtbl;
+
+
/**
* Create a fenced buffer list.
*
struct pb_buffer *buffer);
+/**
+ * Set a buffer's fence.
+ *
+ * NOTE: Although it takes a generic pb_buffer argument, it will fail
+ * on everything but buffers returned by fenced_buffer_create.
+ */
+void
+buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence);
+
+
#ifdef __cplusplus
}
#endif
}
-static enum pipe_error
-malloc_buffer_validate(struct pb_buffer *buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- assert(0);
- return PIPE_ERROR;
-}
-
-
-static void
-malloc_buffer_fence(struct pb_buffer *buf,
- struct pipe_fence_handle *fence)
-{
- assert(0);
-}
-
-
static void
malloc_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
malloc_buffer_destroy,
malloc_buffer_map,
malloc_buffer_unmap,
- malloc_buffer_validate,
- malloc_buffer_fence,
malloc_buffer_get_base_buffer
};
* - the fenced buffer manager, which will delay buffer destruction until the
* the moment the card finishing processing it.
*
- * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
*/
#ifndef PB_BUFMGR_H_
* \file
* Buffer cache.
*
- * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
* \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
}
-static enum pipe_error
-pb_cache_buffer_validate(struct pb_buffer *_buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
- return pb_validate(buf->buffer, vl, flags);
-}
-
-
-static void
-pb_cache_buffer_fence(struct pb_buffer *_buf,
- struct pipe_fence_handle *fence)
-{
- struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
- pb_fence(buf->buffer, fence);
-}
-
-
static void
pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
pb_cache_buffer_destroy,
pb_cache_buffer_map,
pb_cache_buffer_unmap,
- pb_cache_buffer_validate,
- pb_cache_buffer_fence,
pb_cache_buffer_get_base_buffer
};
* \file
* Debug buffer manager to detect buffer under- and overflows.
*
- * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
*/
}
-static enum pipe_error
-pb_debug_buffer_validate(struct pb_buffer *_buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
-
- pb_debug_buffer_check(buf);
-
- return pb_validate(buf->buffer, vl, flags);
-}
-
-
-static void
-pb_debug_buffer_fence(struct pb_buffer *_buf,
- struct pipe_fence_handle *fence)
-{
- struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- pb_fence(buf->buffer, fence);
-}
-
-
const struct pb_vtbl
pb_debug_buffer_vtbl = {
pb_debug_buffer_destroy,
pb_debug_buffer_map,
pb_debug_buffer_unmap,
- pb_debug_buffer_validate,
- pb_debug_buffer_fence,
pb_debug_buffer_get_base_buffer
};
* \file
* A buffer manager that wraps buffers in fenced buffers.
*
- * \author Jose Fonseca <jrfonseca@tungstengraphics.dot.com>
+ * \author José Fonseca <jrfonseca@tungstengraphics.dot.com>
*/
* \file
* Buffer manager using the old texture memory manager.
*
- * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
*/
}
-static enum pipe_error
-mm_buffer_validate(struct pb_buffer *buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- struct mm_buffer *mm_buf = mm_buffer(buf);
- struct mm_pb_manager *mm = mm_buf->mgr;
- return pb_validate(mm->buffer, vl, flags);
-}
-
-
-static void
-mm_buffer_fence(struct pb_buffer *buf,
- struct pipe_fence_handle *fence)
-{
- struct mm_buffer *mm_buf = mm_buffer(buf);
- struct mm_pb_manager *mm = mm_buf->mgr;
- pb_fence(mm->buffer, fence);
-}
-
-
static void
mm_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
mm_buffer_destroy,
mm_buffer_map,
mm_buffer_unmap,
- mm_buffer_validate,
- mm_buffer_fence,
mm_buffer_get_base_buffer
};
* \file
* Batch buffer pool management.
*
- * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
* \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
}
-static enum pipe_error
-pool_buffer_validate(struct pb_buffer *buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- struct pool_buffer *pool_buf = pool_buffer(buf);
- struct pool_pb_manager *pool = pool_buf->mgr;
- return pb_validate(pool->buffer, vl, flags);
-}
-
-
-static void
-pool_buffer_fence(struct pb_buffer *buf,
- struct pipe_fence_handle *fence)
-{
- struct pool_buffer *pool_buf = pool_buffer(buf);
- struct pool_pb_manager *pool = pool_buf->mgr;
- pb_fence(pool->buffer, fence);
-}
-
-
static void
pool_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
pool_buffer_destroy,
pool_buffer_map,
pool_buffer_unmap,
- pool_buffer_validate,
- pool_buffer_fence,
pool_buffer_get_base_buffer
};
}
-static enum pipe_error
-pb_slab_buffer_validate(struct pb_buffer *_buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
- return pb_validate(buf->slab->bo, vl, flags);
-}
-
-
-static void
-pb_slab_buffer_fence(struct pb_buffer *_buf,
- struct pipe_fence_handle *fence)
-{
- struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
- pb_fence(buf->slab->bo, fence);
-}
-
-
static void
pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
pb_slab_buffer_destroy,
pb_slab_buffer_map,
pb_slab_buffer_unmap,
- pb_slab_buffer_validate,
- pb_slab_buffer_fence,
pb_slab_buffer_get_base_buffer
};
#define PB_VALIDATE_INITIAL_SIZE 1 /* 512 */
-struct pb_validate_entry
-{
- struct pb_buffer *buf;
- unsigned flags;
-};
-
-
struct pb_validate
{
- struct pb_validate_entry *entries;
+ struct pb_buffer **buffers;
unsigned used;
unsigned size;
};
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
- struct pb_buffer *buf,
- unsigned flags)
+ struct pb_buffer *buf)
{
assert(buf);
if(!buf)
return PIPE_ERROR;
- assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
- assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
- flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
-
/* We only need to store one reference for each buffer, so avoid storing
- * consecutive references for the same buffer. It might not be the most
- * common pattern, but it is easy to implement.
+ * consecutive references for the same buffer. It might not be the more
+ * common pasttern, but it is easy to implement.
*/
- if(vl->used && vl->entries[vl->used - 1].buf == buf) {
- vl->entries[vl->used - 1].flags |= flags;
+ if(vl->used && vl->buffers[vl->used - 1] == buf) {
return PIPE_OK;
}
/* Grow the table */
if(vl->used == vl->size) {
unsigned new_size;
- struct pb_validate_entry *new_entries;
+ struct pb_buffer **new_buffers;
new_size = vl->size * 2;
if(!new_size)
return PIPE_ERROR_OUT_OF_MEMORY;
- new_entries = (struct pb_validate_entry *)REALLOC(vl->entries,
- vl->size*sizeof(struct pb_validate_entry),
- new_size*sizeof(struct pb_validate_entry));
- if(!new_entries)
+ new_buffers = (struct pb_buffer **)REALLOC(vl->buffers,
+ vl->size*sizeof(struct pb_buffer *),
+ new_size*sizeof(struct pb_buffer *));
+ if(!new_buffers)
return PIPE_ERROR_OUT_OF_MEMORY;
- memset(new_entries + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_validate_entry));
+ memset(new_buffers + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_buffer *));
vl->size = new_size;
- vl->entries = new_entries;
+ vl->buffers = new_buffers;
}
- assert(!vl->entries[vl->used].buf);
- pb_reference(&vl->entries[vl->used].buf, buf);
- vl->entries[vl->used].flags = flags;
+ assert(!vl->buffers[vl->used]);
+ pb_reference(&vl->buffers[vl->used], buf);
++vl->used;
return PIPE_OK;
}
-enum pipe_error
-pb_validate_foreach(struct pb_validate *vl,
- enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
- void *data)
-{
- unsigned i;
- for(i = 0; i < vl->used; ++i) {
- enum pipe_error ret;
- ret = callback(vl->entries[i].buf, data);
- if(ret != PIPE_OK)
- return ret;
- }
- return PIPE_OK;
-}
-
-
enum pipe_error
pb_validate_validate(struct pb_validate *vl)
{
- unsigned i;
-
- for(i = 0; i < vl->used; ++i) {
- enum pipe_error ret;
- ret = pb_validate(vl->entries[i].buf, vl, vl->entries[i].flags);
- if(ret != PIPE_OK) {
- while(i--)
- pb_validate(vl->entries[i].buf, NULL, 0);
- return ret;
- }
- }
-
+ /* FIXME: go through each buffer, ensure its not mapped, its address is
+ * available -- requires a new pb_buffer interface */
return PIPE_OK;
}
{
unsigned i;
for(i = 0; i < vl->used; ++i) {
- pb_fence(vl->entries[i].buf, fence);
- pb_reference(&vl->entries[i].buf, NULL);
+ buffer_fence(vl->buffers[i], fence);
+ pb_reference(&vl->buffers[i], NULL);
}
vl->used = 0;
}
{
unsigned i;
for(i = 0; i < vl->used; ++i)
- pb_reference(&vl->entries[i].buf, NULL);
- FREE(vl->entries);
+ pb_reference(&vl->buffers[i], NULL);
+ FREE(vl->buffers);
FREE(vl);
}
return NULL;
vl->size = PB_VALIDATE_INITIAL_SIZE;
- vl->entries = (struct pb_validate_entry *)CALLOC(vl->size, sizeof(struct pb_buffer *));
- if(!vl->entries) {
+ vl->buffers = (struct pb_buffer **)CALLOC(vl->size, sizeof(struct pb_buffer *));
+ if(!vl->buffers) {
FREE(vl);
return NULL;
}
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
- struct pb_buffer *buf,
- unsigned flags);
-
-enum pipe_error
-pb_validate_foreach(struct pb_validate *vl,
- enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
- void *data);
+ struct pb_buffer *buf);
/**
* Validate all buffers for hardware access.
/**
* Fence all buffers and clear the list.
*
- * Should be called right after issuing commands to the hardware.
+ * Should be called right before issuing commands to the hardware.
*/
void
pb_validate_fence(struct pb_validate *vl,
* Implementation of client buffer (also designated as "user buffers"), which
* are just state-tracker owned data masqueraded as buffers.
*
- * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
*/
}
-static enum pipe_error
-pb_user_buffer_validate(struct pb_buffer *buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- assert(0);
- return PIPE_ERROR;
-}
-
-
-static void
-pb_user_buffer_fence(struct pb_buffer *buf,
- struct pipe_fence_handle *fence)
-{
- assert(0);
-}
-
-
static void
pb_user_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_user_buffer_destroy,
pb_user_buffer_map,
pb_user_buffer_unmap,
- pb_user_buffer_validate,
- pb_user_buffer_fence,
pb_user_buffer_get_base_buffer
};