Add #ifdefs needed to compile Gallium on Solaris with gcc or Sun cc
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
index f9e62264368e7b83625bb096462f14b678c06564..2cd0b8a8cdf3d56529db2b1072eac610bb5f8c26 100644 (file)
 
 #include "pipe/p_config.h"
 
-#if defined(PIPE_OS_LINUX)
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
 #include <unistd.h>
 #include <sched.h>
 #endif
 
 #include "pipe/p_compiler.h"
 #include "pipe/p_error.h"
-#include "pipe/p_debug.h"
+#include "util/u_debug.h"
 #include "pipe/p_thread.h"
 #include "util/u_memory.h"
 #include "util/u_double_list.h"
@@ -66,8 +66,12 @@ struct fenced_buffer_list
    struct pb_fence_ops *ops;
    
    size_t numDelayed;
-   
    struct list_head delayed;
+   
+#ifdef DEBUG
+   size_t numUnfenced;
+   struct list_head unfenced;
+#endif
 };
 
 
@@ -111,12 +115,15 @@ _fenced_buffer_add(struct fenced_buffer *fenced_buf)
 {
    struct fenced_buffer_list *fenced_list = fenced_buf->list;
 
-   assert(fenced_buf->base.base.refcount);
+   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
    assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
    assert(fenced_buf->fence);
 
-   assert(!fenced_buf->head.prev);
-   assert(!fenced_buf->head.next);
+#ifdef DEBUG
+   LIST_DEL(&fenced_buf->head);
+   assert(fenced_list->numUnfenced);
+   --fenced_list->numUnfenced;
+#endif
    LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
    ++fenced_list->numDelayed;
 }
@@ -128,8 +135,19 @@ _fenced_buffer_add(struct fenced_buffer *fenced_buf)
 static INLINE void
 _fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
 {
-   assert(!fenced_buf->base.base.refcount);
+   struct fenced_buffer_list *fenced_list = fenced_buf->list;
+   
+   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
    assert(!fenced_buf->fence);
+#ifdef DEBUG
+   assert(fenced_buf->head.prev);
+   assert(fenced_buf->head.next);
+   LIST_DEL(&fenced_buf->head);
+   assert(fenced_list->numUnfenced);
+   --fenced_list->numUnfenced;
+#else
+   (void)fenced_list;
+#endif
    pb_reference(&fenced_buf->buffer, NULL);
    FREE(fenced_buf);
 }
@@ -149,16 +167,21 @@ _fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
    
    assert(fenced_buf->head.prev);
    assert(fenced_buf->head.next);
-   LIST_DEL(&fenced_buf->head);
-#ifdef DEBUG
-   fenced_buf->head.prev = NULL;
-   fenced_buf->head.next = NULL;
-#endif
    
+   LIST_DEL(&fenced_buf->head);
    assert(fenced_list->numDelayed);
    --fenced_list->numDelayed;
    
-   if(!fenced_buf->base.base.refcount)
+#ifdef DEBUG
+   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
+   ++fenced_list->numUnfenced;
+#endif
+   
+   /**
+    * FIXME!!!
+    */
+
+   if(!pipe_is_referenced(&fenced_buf->base.base.reference))
       _fenced_buffer_destroy(fenced_buf);
 }
 
@@ -234,7 +257,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
    struct fenced_buffer_list *fenced_list = fenced_buf->list;
 
    pipe_mutex_lock(fenced_list->mutex);
-   assert(fenced_buf->base.base.refcount == 0);
+   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
    if (fenced_buf->fence) {
       struct pb_fence_ops *ops = fenced_list->ops;
       if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
@@ -265,16 +288,26 @@ fenced_buffer_map(struct pb_buffer *buf,
                   unsigned flags)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+   struct fenced_buffer_list *fenced_list = fenced_buf->list;
+   struct pb_fence_ops *ops = fenced_list->ops;
    void *map;
 
-   assert(flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE);
-   assert(!(flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE));
-   flags &= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+   assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
    
-   /* Check for GPU read/write access */
-   if(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) {
-      /* Wait for the GPU to finish writing */
-      _fenced_buffer_finish(fenced_buf);
+   /* Serialize writes */
+   if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
+      ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
+      if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
+         /* Don't wait for the GPU to finish writing */
+         if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0)
+            _fenced_buffer_remove(fenced_list, fenced_buf);
+         else
+            return NULL;
+      }
+      else {
+         /* Wait for the GPU to finish writing */
+         _fenced_buffer_finish(fenced_buf);
+      }
    }
 
 #if 0
@@ -288,7 +321,7 @@ fenced_buffer_map(struct pb_buffer *buf,
    map = pb_map(fenced_buf->buffer, flags);
    if(map) {
       ++fenced_buf->mapcount;
-      fenced_buf->flags |= flags;
+      fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
    }
 
    return map;
@@ -432,7 +465,7 @@ fenced_buffer_create(struct fenced_buffer_list *fenced_list,
       return NULL;
    }
    
-   buf->base.base.refcount = 1;
+   pipe_reference_init(&buf->base.base.reference, 1);
    buf->base.base.alignment = buffer->base.alignment;
    buf->base.base.usage = buffer->base.usage;
    buf->base.base.size = buffer->base.size;
@@ -441,6 +474,13 @@ fenced_buffer_create(struct fenced_buffer_list *fenced_list,
    buf->buffer = buffer;
    buf->list = fenced_list;
    
+#ifdef DEBUG
+   pipe_mutex_lock(fenced_list->mutex);
+   LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
+   ++fenced_list->numUnfenced;
+   pipe_mutex_unlock(fenced_list->mutex);
+#endif
+
    return &buf->base;
 }
 
@@ -457,9 +497,13 @@ fenced_buffer_list_create(struct pb_fence_ops *ops)
    fenced_list->ops = ops;
 
    LIST_INITHEAD(&fenced_list->delayed);
-
    fenced_list->numDelayed = 0;
    
+#ifdef DEBUG
+   LIST_INITHEAD(&fenced_list->unfenced);
+   fenced_list->numUnfenced = 0;
+#endif
+
    pipe_mutex_init(fenced_list->mutex);
 
    return fenced_list;
@@ -476,6 +520,51 @@ fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
 }
 
 
+#ifdef DEBUG
+void
+fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
+{
+   struct pb_fence_ops *ops = fenced_list->ops;
+   struct list_head *curr, *next;
+   struct fenced_buffer *fenced_buf;
+
+   pipe_mutex_lock(fenced_list->mutex);
+
+   debug_printf("%10s %7s %10s %s\n",
+                "buffer", "reference.count", "fence", "signalled");
+   
+   curr = fenced_list->unfenced.next;
+   next = curr->next;
+   while(curr != &fenced_list->unfenced) {
+      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+      assert(!fenced_buf->fence);
+      debug_printf("%10p %7u\n",
+                   fenced_buf,
+                   fenced_buf->base.base.reference.count);
+      curr = next; 
+      next = curr->next;
+   }
+   
+   curr = fenced_list->delayed.next;
+   next = curr->next;
+   while(curr != &fenced_list->delayed) {
+      int signaled;
+      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+      signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+      debug_printf("%10p %7u %10p %s\n",
+                   fenced_buf,
+                   fenced_buf->base.base.reference.count,
+                   fenced_buf->fence,
+                   signaled == 0 ? "y" : "n");
+      curr = next; 
+      next = curr->next;
+   }
+   
+   pipe_mutex_unlock(fenced_list->mutex);
+}
+#endif
+
+
 void
 fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
 {
@@ -484,13 +573,17 @@ fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
    /* Wait on outstanding fences */
    while (fenced_list->numDelayed) {
       pipe_mutex_unlock(fenced_list->mutex);
-#if defined(PIPE_OS_LINUX)
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
       sched_yield();
 #endif
       _fenced_buffer_list_check_free(fenced_list, 1);
       pipe_mutex_lock(fenced_list->mutex);
    }
 
+#ifdef DEBUG
+   //assert(!fenced_list->numUnfenced);
+#endif
+      
    pipe_mutex_unlock(fenced_list->mutex);
    
    fenced_list->ops->destroy(fenced_list->ops);