/**************************************************************************
*
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2007-2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "pipe/p_config.h"
-#if defined(PIPE_OS_LINUX)
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
#include <unistd.h>
#include <sched.h>
#endif
#include "pipe/p_compiler.h"
-#include "pipe/p_error.h"
+#include "pipe/p_defines.h"
#include "util/u_debug.h"
#include "pipe/p_thread.h"
#include "util/u_memory.h"
struct pb_fence_ops *ops;
- size_t numDelayed;
-
+ pb_size numDelayed;
struct list_head delayed;
+
+#ifdef DEBUG
+ pb_size numUnfenced;
+ struct list_head unfenced;
+#endif
};
*/
struct fenced_buffer
{
+ /*
+ * Immutable members.
+ */
+
struct pb_buffer base;
-
struct pb_buffer *buffer;
+ struct fenced_buffer_list *list;
- /* FIXME: protect access with mutex */
+ /**
+ * Protected by fenced_buffer_list::mutex
+ */
+ struct list_head head;
+
+ /**
+ * Following members are mutable and protected by this mutex.
+ *
+ * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
+ * held, but in order to prevent deadlocks you must never lock
+ * fenced_buffer_list::mutex with this mutex held.
+ */
+ pipe_mutex mutex;
/**
* A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
struct pb_validate *vl;
unsigned validation_flags;
struct pipe_fence_handle *fence;
-
- struct list_head head;
- struct fenced_buffer_list *list;
};
}
+/**
+ * Add the buffer to the fenced list.
+ *
+ * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
+ * order before calling this function.
+ *
+ * Reference count should be incremented before calling this function.
+ */
static INLINE void
-_fenced_buffer_add(struct fenced_buffer *fenced_buf)
+fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list,
+ struct fenced_buffer *fenced_buf)
{
- struct fenced_buffer_list *fenced_list = fenced_buf->list;
-
- assert(fenced_buf->base.base.refcount);
+ assert(pipe_is_referenced(&fenced_buf->base.base.reference));
assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
assert(fenced_buf->fence);
- assert(!fenced_buf->head.prev);
- assert(!fenced_buf->head.next);
+ /* TODO: Move the reference count increment here */
+
+#ifdef DEBUG
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numUnfenced);
+ --fenced_list->numUnfenced;
+#endif
LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
++fenced_list->numDelayed;
}
/**
- * Actually destroy the buffer.
+ * Remove the buffer from the fenced list.
+ *
+ * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
+ * order before calling this function.
+ *
+ * Reference count should be decremented after calling this function.
*/
static INLINE void
-_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
-{
- assert(!fenced_buf->base.base.refcount);
- assert(!fenced_buf->fence);
- pb_reference(&fenced_buf->buffer, NULL);
- FREE(fenced_buf);
-}
-
-
-static INLINE void
-_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
- struct fenced_buffer *fenced_buf)
+fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
+ struct fenced_buffer *fenced_buf)
{
struct pb_fence_ops *ops = fenced_list->ops;
assert(fenced_buf->head.prev);
assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
-#ifdef DEBUG
- fenced_buf->head.prev = NULL;
- fenced_buf->head.next = NULL;
-#endif
+ LIST_DEL(&fenced_buf->head);
assert(fenced_list->numDelayed);
--fenced_list->numDelayed;
- if(!fenced_buf->base.base.refcount)
- _fenced_buffer_destroy(fenced_buf);
+#ifdef DEBUG
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
+ ++fenced_list->numUnfenced;
+#endif
+
+ /* TODO: Move the reference count decrement and destruction here */
}
+/**
+ * Wait for the fence to expire, and remove it from the fenced list.
+ *
+ * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be
+ * held -- it will
+ */
static INLINE enum pipe_error
-_fenced_buffer_finish(struct fenced_buffer *fenced_buf)
+fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
+ struct fenced_buffer *fenced_buf)
{
- struct fenced_buffer_list *fenced_list = fenced_buf->list;
struct pb_fence_ops *ops = fenced_list->ops;
+ enum pipe_error ret = PIPE_ERROR;
#if 0
debug_warning("waiting for GPU");
#endif
+ assert(pipe_is_referenced(&fenced_buf->base.base.reference));
assert(fenced_buf->fence);
+
+ /* Acquire the global lock */
+ pipe_mutex_unlock(fenced_buf->mutex);
+ pipe_mutex_lock(fenced_list->mutex);
+ pipe_mutex_lock(fenced_buf->mutex);
+
if(fenced_buf->fence) {
- if(ops->fence_finish(ops, fenced_buf->fence, 0) != 0) {
- return PIPE_ERROR;
+ if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
+ /* Remove from the fenced list */
+ /* TODO: remove consequents */
+ fenced_buffer_remove_locked(fenced_list, fenced_buf);
+
+ p_atomic_dec(&fenced_buf->base.base.reference.count);
+ assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+ ret = PIPE_OK;
}
- /* Remove from the fenced list */
- /* TODO: remove consequents */
- _fenced_buffer_remove(fenced_list, fenced_buf);
}
- fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
- return PIPE_OK;
+ pipe_mutex_unlock(fenced_list->mutex);
+
+ return ret;
}
* Free as many fenced buffers from the list head as possible.
*/
static void
-_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list,
int wait)
{
struct pb_fence_ops *ops = fenced_list->ops;
struct list_head *curr, *next;
struct fenced_buffer *fenced_buf;
+ struct pb_buffer *pb_buf;
struct pipe_fence_handle *prev_fence = NULL;
curr = fenced_list->delayed.next;
while(curr != &fenced_list->delayed) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ pipe_mutex_lock(fenced_buf->mutex);
+
if(fenced_buf->fence != prev_fence) {
int signaled;
if (wait)
signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
else
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
- if (signaled != 0)
+ if (signaled != 0) {
+ pipe_mutex_unlock(fenced_buf->mutex);
break;
+ }
prev_fence = fenced_buf->fence;
}
else {
assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
}
- _fenced_buffer_remove(fenced_list, fenced_buf);
+ fenced_buffer_remove_locked(fenced_list, fenced_buf);
+ pipe_mutex_unlock(fenced_buf->mutex);
+
+ pb_buf = &fenced_buf->base;
+ pb_reference(&pb_buf, NULL);
+
curr = next;
next = curr->next;
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(!fenced_buf->fence);
+
+#ifdef DEBUG
pipe_mutex_lock(fenced_list->mutex);
- assert(fenced_buf->base.base.refcount == 0);
- if (fenced_buf->fence) {
- struct pb_fence_ops *ops = fenced_list->ops;
- if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
- struct list_head *curr, *prev;
- curr = &fenced_buf->head;
- prev = curr->prev;
- do {
- fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
- assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
- _fenced_buffer_remove(fenced_list, fenced_buf);
- curr = prev;
- prev = curr->prev;
- } while (curr != &fenced_list->delayed);
- }
- else {
- /* delay destruction */
- }
- }
- else {
- _fenced_buffer_destroy(fenced_buf);
- }
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numUnfenced);
+ --fenced_list->numUnfenced;
pipe_mutex_unlock(fenced_list->mutex);
+#else
+ (void)fenced_list;
+#endif
+
+ pb_reference(&fenced_buf->buffer, NULL);
+
+ pipe_mutex_destroy(fenced_buf->mutex);
+ FREE(fenced_buf);
}
unsigned flags)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
- void *map;
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ struct pb_fence_ops *ops = fenced_list->ops;
+ void *map = NULL;
+
+ pipe_mutex_lock(fenced_buf->mutex);
- assert(flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE);
- assert(!(flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE));
- flags &= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
- /* Check for GPU read/write access */
- if(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) {
+ /* Serialize writes */
+ if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
+ ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
+ if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
+ ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
+ /* Don't wait for the GPU to finish writing */
+ goto finish;
+ }
+
/* Wait for the GPU to finish writing */
- _fenced_buffer_finish(fenced_buf);
+ fenced_buffer_finish_locked(fenced_list, fenced_buf);
}
#if 0
map = pb_map(fenced_buf->buffer, flags);
if(map) {
++fenced_buf->mapcount;
- fenced_buf->flags |= flags;
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
}
+finish:
+ pipe_mutex_unlock(fenced_buf->mutex);
+
return map;
}
fenced_buffer_unmap(struct pb_buffer *buf)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+
+ pipe_mutex_lock(fenced_buf->mutex);
+
assert(fenced_buf->mapcount);
if(fenced_buf->mapcount) {
pb_unmap(fenced_buf->buffer);
if(!fenced_buf->mapcount)
fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
}
+
+ pipe_mutex_unlock(fenced_buf->mutex);
}
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
enum pipe_error ret;
+ pipe_mutex_lock(fenced_buf->mutex);
+
if(!vl) {
/* invalidate */
fenced_buf->vl = NULL;
fenced_buf->validation_flags = 0;
- return PIPE_OK;
+ ret = PIPE_OK;
+ goto finish;
}
assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
/* Buffer cannot be validated in two different lists */
- if(fenced_buf->vl && fenced_buf->vl != vl)
- return PIPE_ERROR_RETRY;
+ if(fenced_buf->vl && fenced_buf->vl != vl) {
+ ret = PIPE_ERROR_RETRY;
+ goto finish;
+ }
+#if 0
/* Do not validate if buffer is still mapped */
if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
/* TODO: wait for the thread that mapped the buffer to unmap it */
- return PIPE_ERROR_RETRY;
+ ret = PIPE_ERROR_RETRY;
+ goto finish;
}
+ /* Final sanity checking */
+ assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
+ assert(!fenced_buf->mapcount);
+#endif
if(fenced_buf->vl == vl &&
(fenced_buf->validation_flags & flags) == flags) {
/* Nothing to do -- buffer already validated */
- return PIPE_OK;
+ ret = PIPE_OK;
+ goto finish;
}
-
- /* Final sanity checking */
- assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
- assert(!fenced_buf->mapcount);
ret = pb_validate(fenced_buf->buffer, vl, flags);
if (ret != PIPE_OK)
- return ret;
+ goto finish;
fenced_buf->vl = vl;
fenced_buf->validation_flags |= flags;
- return PIPE_OK;
+finish:
+ pipe_mutex_unlock(fenced_buf->mutex);
+
+ return ret;
}
fenced_buf = fenced_buffer(buf);
fenced_list = fenced_buf->list;
ops = fenced_list->ops;
-
- if(fence == fenced_buf->fence) {
- /* Nothing to do */
- return;
- }
- assert(fenced_buf->vl);
- assert(fenced_buf->validation_flags);
-
pipe_mutex_lock(fenced_list->mutex);
- if (fenced_buf->fence)
- _fenced_buffer_remove(fenced_list, fenced_buf);
- if (fence) {
- ops->fence_reference(ops, &fenced_buf->fence, fence);
- fenced_buf->flags |= fenced_buf->validation_flags;
- _fenced_buffer_add(fenced_buf);
- }
- pipe_mutex_unlock(fenced_list->mutex);
+ pipe_mutex_lock(fenced_buf->mutex);
+
+ assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+
+ if(fence != fenced_buf->fence) {
+ assert(fenced_buf->vl);
+ assert(fenced_buf->validation_flags);
+
+ if (fenced_buf->fence) {
+ fenced_buffer_remove_locked(fenced_list, fenced_buf);
+ p_atomic_dec(&fenced_buf->base.base.reference.count);
+ assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+ }
+ if (fence) {
+ ops->fence_reference(ops, &fenced_buf->fence, fence);
+ fenced_buf->flags |= fenced_buf->validation_flags;
+ p_atomic_inc(&fenced_buf->base.base.reference.count);
+ fenced_buffer_add_locked(fenced_list, fenced_buf);
+ }
+
+ pb_fence(fenced_buf->buffer, fence);
- pb_fence(fenced_buf->buffer, fence);
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
+ }
- fenced_buf->vl = NULL;
- fenced_buf->validation_flags = 0;
+ pipe_mutex_unlock(fenced_buf->mutex);
+ pipe_mutex_unlock(fenced_list->mutex);
}
static void
fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
- unsigned *offset)
+ pb_size *offset)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ /* NOTE: accesses immutable members only -- mutex not necessary */
pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
}
return NULL;
}
- buf->base.base.refcount = 1;
+ pipe_reference_init(&buf->base.base.reference, 1);
buf->base.base.alignment = buffer->base.alignment;
buf->base.base.usage = buffer->base.usage;
buf->base.base.size = buffer->base.size;
buf->buffer = buffer;
buf->list = fenced_list;
+ pipe_mutex_init(buf->mutex);
+
+#ifdef DEBUG
+ pipe_mutex_lock(fenced_list->mutex);
+ LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
+ ++fenced_list->numUnfenced;
+ pipe_mutex_unlock(fenced_list->mutex);
+#endif
+
return &buf->base;
}
fenced_list->ops = ops;
LIST_INITHEAD(&fenced_list->delayed);
-
fenced_list->numDelayed = 0;
+#ifdef DEBUG
+ LIST_INITHEAD(&fenced_list->unfenced);
+ fenced_list->numUnfenced = 0;
+#endif
+
pipe_mutex_init(fenced_list->mutex);
return fenced_list;
int wait)
{
pipe_mutex_lock(fenced_list->mutex);
- _fenced_buffer_list_check_free(fenced_list, wait);
+ fenced_buffer_list_check_free_locked(fenced_list, wait);
pipe_mutex_unlock(fenced_list->mutex);
}
+#ifdef DEBUG
+void
+fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
+{
+ struct pb_fence_ops *ops = fenced_list->ops;
+ struct list_head *curr, *next;
+ struct fenced_buffer *fenced_buf;
+
+ pipe_mutex_lock(fenced_list->mutex);
+
+ debug_printf("%10s %7s %7s %10s %s\n",
+ "buffer", "size", "refcount", "fence", "signalled");
+
+ curr = fenced_list->unfenced.next;
+ next = curr->next;
+ while(curr != &fenced_list->unfenced) {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ pipe_mutex_lock(fenced_buf->mutex);
+ assert(!fenced_buf->fence);
+ debug_printf("%10p %7u %7u\n",
+ (void *) fenced_buf,
+ fenced_buf->base.base.size,
+ p_atomic_read(&fenced_buf->base.base.reference.count));
+ pipe_mutex_unlock(fenced_buf->mutex);
+ curr = next;
+ next = curr->next;
+ }
+
+ curr = fenced_list->delayed.next;
+ next = curr->next;
+ while(curr != &fenced_list->delayed) {
+ int signaled;
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ pipe_mutex_lock(fenced_buf->mutex);
+ signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+ debug_printf("%10p %7u %7u %10p %s\n",
+ (void *) fenced_buf,
+ fenced_buf->base.base.size,
+ p_atomic_read(&fenced_buf->base.base.reference.count),
+ (void *) fenced_buf->fence,
+ signaled == 0 ? "y" : "n");
+ pipe_mutex_unlock(fenced_buf->mutex);
+ curr = next;
+ next = curr->next;
+ }
+
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+#endif
+
+
void
fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
{
/* Wait on outstanding fences */
while (fenced_list->numDelayed) {
pipe_mutex_unlock(fenced_list->mutex);
-#if defined(PIPE_OS_LINUX)
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
- _fenced_buffer_list_check_free(fenced_list, 1);
pipe_mutex_lock(fenced_list->mutex);
+ fenced_buffer_list_check_free_locked(fenced_list, 1);
}
+#ifdef DEBUG
+ /*assert(!fenced_list->numUnfenced);*/
+#endif
+
pipe_mutex_unlock(fenced_list->mutex);
+ pipe_mutex_destroy(fenced_list->mutex);
fenced_list->ops->destroy(fenced_list->ops);