pb: Make fenced buffers threadsafe.
authorJosé Fonseca <jfonseca@vmware.com>
Thu, 19 Nov 2009 19:46:21 +0000 (19:46 +0000)
committerJosé Fonseca <jfonseca@vmware.com>
Thu, 19 Nov 2009 19:46:21 +0000 (19:46 +0000)
src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c

index 2ef4293d4d7bfb9529018ed2b62ecbd9b22eb2dd..2f973684f67b28c2d88a9f5af3cbb158fa019b1a 100644 (file)
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2007-2009 VMware, Inc.
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -18,7 +18,7 @@
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -80,11 +80,27 @@ struct fenced_buffer_list
  */
 struct fenced_buffer
 {
+   /* 
+    * Immutable members.
+    */
+
    struct pb_buffer base;
-   
    struct pb_buffer *buffer;
+   struct fenced_buffer_list *list;
+
+   /**
+    * Protected by fenced_buffer_list::mutex
+    */
+   struct list_head head;
 
-   /* FIXME: protect access with mutex */
+   /**
+    * Following members are mutable and protected by this mutex.
+    * 
+    * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
+    * held, but in order to prevent deadlocks you must never lock 
+    * fenced_buffer_list::mutex with this mutex held.
+    */
+   pipe_mutex mutex;
 
    /**
     * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
@@ -96,9 +112,6 @@ struct fenced_buffer
    struct pb_validate *vl;
    unsigned validation_flags;
    struct pipe_fence_handle *fence;
-
-   struct list_head head;
-   struct fenced_buffer_list *list;
 };
 
 
@@ -110,15 +123,24 @@ fenced_buffer(struct pb_buffer *buf)
 }
 
 
+/**
+ * Add the buffer to the fenced list.
+ * 
+ * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this 
+ * order before calling this function.
+ * 
+ * Reference count should be incremented before calling this function.
+ */
 static INLINE void
-_fenced_buffer_add(struct fenced_buffer *fenced_buf)
+fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list, 
+                         struct fenced_buffer *fenced_buf)
 {
-   struct fenced_buffer_list *fenced_list = fenced_buf->list;
-
    assert(pipe_is_referenced(&fenced_buf->base.base.reference));
    assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
    assert(fenced_buf->fence);
 
+   /* TODO: Move the reference count increment here */
+   
 #ifdef DEBUG
    LIST_DEL(&fenced_buf->head);
    assert(fenced_list->numUnfenced);
@@ -130,32 +152,16 @@ _fenced_buffer_add(struct fenced_buffer *fenced_buf)
 
 
 /**
- * Actually destroy the buffer.
+ * Remove the buffer from the fenced list.
+ * 
+ * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this 
+ * order before calling this function.
+ * 
+ * Reference count should be decremented after calling this function.
  */
 static INLINE void
-_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
-{
-   struct fenced_buffer_list *fenced_list = fenced_buf->list;
-   
-   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
-   assert(!fenced_buf->fence);
-#ifdef DEBUG
-   assert(fenced_buf->head.prev);
-   assert(fenced_buf->head.next);
-   LIST_DEL(&fenced_buf->head);
-   assert(fenced_list->numUnfenced);
-   --fenced_list->numUnfenced;
-#else
-   (void)fenced_list;
-#endif
-   pb_reference(&fenced_buf->buffer, NULL);
-   FREE(fenced_buf);
-}
-
-
-static INLINE void
-_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
-                      struct fenced_buffer *fenced_buf)
+fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
+                            struct fenced_buffer *fenced_buf)
 {
    struct pb_fence_ops *ops = fenced_list->ops;
 
@@ -177,37 +183,53 @@ _fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
    ++fenced_list->numUnfenced;
 #endif
    
-   /**
-    * FIXME!!!
-    */
-
-   if(!pipe_is_referenced(&fenced_buf->base.base.reference))
-      _fenced_buffer_destroy(fenced_buf);
+   /* TODO: Move the reference count decrement and destruction here */
 }
 
 
+/**
+ * Wait for the fence to expire, and remove it from the fenced list.
+ * 
+ * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be 
+ * held -- it will
+ */
 static INLINE enum pipe_error
-_fenced_buffer_finish(struct fenced_buffer *fenced_buf)
+fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
+                              struct fenced_buffer *fenced_buf)
 {
-   struct fenced_buffer_list *fenced_list = fenced_buf->list;
    struct pb_fence_ops *ops = fenced_list->ops;
+   enum pipe_error ret = PIPE_ERROR;
 
 #if 0
    debug_warning("waiting for GPU");
 #endif
 
+   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
    assert(fenced_buf->fence);
+
+   /* Acquire the global lock */
+   pipe_mutex_unlock(fenced_buf->mutex);
+   pipe_mutex_lock(fenced_list->mutex);
+   pipe_mutex_lock(fenced_buf->mutex);
+
    if(fenced_buf->fence) {
-      if(ops->fence_finish(ops, fenced_buf->fence, 0) != 0) {
-        return PIPE_ERROR;
+      if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
+         /* Remove from the fenced list */
+         /* TODO: remove consequents */
+         fenced_buffer_remove_locked(fenced_list, fenced_buf);
+         
+         p_atomic_dec(&fenced_buf->base.base.reference.count);
+         assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+
+         fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+         ret = PIPE_OK;
       }
-      /* Remove from the fenced list */
-      /* TODO: remove consequents */
-      _fenced_buffer_remove(fenced_list, fenced_buf);
    }
 
-   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
-   return PIPE_OK;
+   pipe_mutex_unlock(fenced_list->mutex);
+
+   return ret;
 }
 
 
@@ -215,7 +237,7 @@ _fenced_buffer_finish(struct fenced_buffer *fenced_buf)
  * Free as many fenced buffers from the list head as possible. 
  */
 static void
-_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, 
+fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list, 
                                int wait)
 {
    struct pb_fence_ops *ops = fenced_list->ops;
@@ -228,21 +250,28 @@ _fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
    while(curr != &fenced_list->delayed) {
       fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
 
+      pipe_mutex_lock(fenced_buf->mutex);
+
       if(fenced_buf->fence != prev_fence) {
         int signaled;
         if (wait)
            signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
         else
            signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
-        if (signaled != 0)
+        if (signaled != 0) {
+            pipe_mutex_unlock(fenced_buf->mutex);
            break;
+         }
         prev_fence = fenced_buf->fence;
       }
       else {
         assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
       }
 
-      _fenced_buffer_remove(fenced_list, fenced_buf);
+      fenced_buffer_remove_locked(fenced_list, fenced_buf);
+      pipe_mutex_unlock(fenced_buf->mutex);
+
+      pb_reference((struct pb_buffer **)&fenced_buf, NULL);
 
       curr = next; 
       next = curr->next;
@@ -256,30 +285,25 @@ fenced_buffer_destroy(struct pb_buffer *buf)
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);   
    struct fenced_buffer_list *fenced_list = fenced_buf->list;
 
-   pipe_mutex_lock(fenced_list->mutex);
    assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
-   if (fenced_buf->fence) {
-      struct pb_fence_ops *ops = fenced_list->ops;
-      if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
-        struct list_head *curr, *prev;
-        curr = &fenced_buf->head;
-        prev = curr->prev;
-        do {
-           fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
-           assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
-           _fenced_buffer_remove(fenced_list, fenced_buf);
-           curr = prev;
-           prev = curr->prev;
-        } while (curr != &fenced_list->delayed);
-      }          
-      else {
-        /* delay destruction */
-      }
-   }
-   else {
-      _fenced_buffer_destroy(fenced_buf);
-   }
+   assert(!fenced_buf->fence);
+
+#ifdef DEBUG
+   pipe_mutex_lock(fenced_list->mutex);
+   assert(fenced_buf->head.prev);
+   assert(fenced_buf->head.next);
+   LIST_DEL(&fenced_buf->head);
+   assert(fenced_list->numUnfenced);
+   --fenced_list->numUnfenced;
    pipe_mutex_unlock(fenced_list->mutex);
+#else
+   (void)fenced_list;
+#endif
+
+   pb_reference(&fenced_buf->buffer, NULL);
+
+   pipe_mutex_destroy(fenced_buf->mutex);
+   FREE(fenced_buf);
 }
 
 
@@ -290,24 +314,23 @@ fenced_buffer_map(struct pb_buffer *buf,
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_buffer_list *fenced_list = fenced_buf->list;
    struct pb_fence_ops *ops = fenced_list->ops;
-   void *map;
+   void *map = NULL;
+
+   pipe_mutex_lock(fenced_buf->mutex);
 
    assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
    
    /* Serialize writes */
    if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
       ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
-      if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
+      if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
+          ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
          /* Don't wait for the GPU to finish writing */
-         if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0)
-            _fenced_buffer_remove(fenced_list, fenced_buf);
-         else
-            return NULL;
-      }
-      else {
-         /* Wait for the GPU to finish writing */
-         _fenced_buffer_finish(fenced_buf);
+         goto finish;
       }
+
+      /* Wait for the GPU to finish writing */
+      fenced_buffer_finish_locked(fenced_list, fenced_buf);
    }
 
 #if 0
@@ -324,6 +347,9 @@ fenced_buffer_map(struct pb_buffer *buf,
       fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
    }
 
+finish:
+   pipe_mutex_unlock(fenced_buf->mutex);
+   
    return map;
 }
 
@@ -332,6 +358,9 @@ static void
 fenced_buffer_unmap(struct pb_buffer *buf)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+   
+   pipe_mutex_lock(fenced_buf->mutex);
+   
    assert(fenced_buf->mapcount);
    if(fenced_buf->mapcount) {
       pb_unmap(fenced_buf->buffer);
@@ -339,6 +368,8 @@ fenced_buffer_unmap(struct pb_buffer *buf)
       if(!fenced_buf->mapcount)
         fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
    }
+   
+   pipe_mutex_unlock(fenced_buf->mutex);
 }
 
 
@@ -350,11 +381,14 @@ fenced_buffer_validate(struct pb_buffer *buf,
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    enum pipe_error ret;
    
+   pipe_mutex_lock(fenced_buf->mutex);
+
    if(!vl) {
       /* invalidate */
       fenced_buf->vl = NULL;
       fenced_buf->validation_flags = 0;
-      return PIPE_OK;
+      ret = PIPE_OK;
+      goto finish;
    }
    
    assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
@@ -362,14 +396,17 @@ fenced_buffer_validate(struct pb_buffer *buf,
    flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
 
    /* Buffer cannot be validated in two different lists */ 
-   if(fenced_buf->vl && fenced_buf->vl != vl)
-      return PIPE_ERROR_RETRY;
+   if(fenced_buf->vl && fenced_buf->vl != vl) {
+      ret = PIPE_ERROR_RETRY;
+      goto finish;
+   }
    
 #if 0
    /* Do not validate if buffer is still mapped */
    if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
       /* TODO: wait for the thread that mapped the buffer to unmap it */
-      return PIPE_ERROR_RETRY;
+      ret = PIPE_ERROR_RETRY;
+      goto finish;
    }
    /* Final sanity checking */
    assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
@@ -379,17 +416,21 @@ fenced_buffer_validate(struct pb_buffer *buf,
    if(fenced_buf->vl == vl &&
       (fenced_buf->validation_flags & flags) == flags) {
       /* Nothing to do -- buffer already validated */
-      return PIPE_OK;
+      ret = PIPE_OK;
+      goto finish;
    }
    
    ret = pb_validate(fenced_buf->buffer, vl, flags);
    if (ret != PIPE_OK)
-      return ret;
+      goto finish;
    
    fenced_buf->vl = vl;
    fenced_buf->validation_flags |= flags;
    
-   return PIPE_OK;
+finish:
+   pipe_mutex_unlock(fenced_buf->mutex);
+
+   return ret;
 }
 
 
@@ -404,29 +445,36 @@ fenced_buffer_fence(struct pb_buffer *buf,
    fenced_buf = fenced_buffer(buf);
    fenced_list = fenced_buf->list;
    ops = fenced_list->ops;
-   
-   if(fence == fenced_buf->fence) {
-      /* Nothing to do */
-      return;
-   }
 
-   assert(fenced_buf->vl);
-   assert(fenced_buf->validation_flags);
-   
    pipe_mutex_lock(fenced_list->mutex);
-   if (fenced_buf->fence)
-      _fenced_buffer_remove(fenced_list, fenced_buf);
-   if (fence) {
-      ops->fence_reference(ops, &fenced_buf->fence, fence);
-      fenced_buf->flags |= fenced_buf->validation_flags;
-      _fenced_buffer_add(fenced_buf);
-   }
-   pipe_mutex_unlock(fenced_list->mutex);
+   pipe_mutex_lock(fenced_buf->mutex);
+
+   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+
+   if(fence != fenced_buf->fence) {
+      assert(fenced_buf->vl);
+      assert(fenced_buf->validation_flags);
+      
+      if (fenced_buf->fence) {
+         fenced_buffer_remove_locked(fenced_list, fenced_buf);
+         p_atomic_dec(&fenced_buf->base.base.reference.count);
+         assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+      }
+      if (fence) {
+         ops->fence_reference(ops, &fenced_buf->fence, fence);
+         fenced_buf->flags |= fenced_buf->validation_flags;
+         p_atomic_inc(&fenced_buf->base.base.reference.count);
+         fenced_buffer_add_locked(fenced_list, fenced_buf);
+      }
+
+      pb_fence(fenced_buf->buffer, fence);
    
-   pb_fence(fenced_buf->buffer, fence);
+      fenced_buf->vl = NULL;
+      fenced_buf->validation_flags = 0;
+   }
 
-   fenced_buf->vl = NULL;
-   fenced_buf->validation_flags = 0;
+   pipe_mutex_unlock(fenced_buf->mutex);
+   pipe_mutex_unlock(fenced_list->mutex);
 }
 
 
@@ -436,6 +484,7 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
                               pb_size *offset)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+   /* NOTE: accesses immutable members only -- mutex not necessary */
    pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
 }
 
@@ -475,6 +524,8 @@ fenced_buffer_create(struct fenced_buffer_list *fenced_list,
    buf->buffer = buffer;
    buf->list = fenced_list;
    
+   pipe_mutex_init(buf->mutex);
+
 #ifdef DEBUG
    pipe_mutex_lock(fenced_list->mutex);
    LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
@@ -516,7 +567,7 @@ fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
                               int wait)
 {
    pipe_mutex_lock(fenced_list->mutex);
-   _fenced_buffer_list_check_free(fenced_list, wait);
+   fenced_buffer_list_check_free_locked(fenced_list, wait);
    pipe_mutex_unlock(fenced_list->mutex);
 }
 
@@ -538,11 +589,13 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
    next = curr->next;
    while(curr != &fenced_list->unfenced) {
       fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+      pipe_mutex_lock(fenced_buf->mutex);
       assert(!fenced_buf->fence);
       debug_printf("%10p %7u %7u\n",
                    (void *) fenced_buf,
                    fenced_buf->base.base.size,
                    p_atomic_read(&fenced_buf->base.base.reference.count));
+      pipe_mutex_unlock(fenced_buf->mutex);
       curr = next; 
       next = curr->next;
    }
@@ -552,6 +605,7 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
    while(curr != &fenced_list->delayed) {
       int signaled;
       fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+      pipe_mutex_lock(fenced_buf->mutex);
       signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
       debug_printf("%10p %7u %7u %10p %s\n",
                    (void *) fenced_buf,
@@ -559,6 +613,7 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
                    p_atomic_read(&fenced_buf->base.base.reference.count),
                    (void *) fenced_buf->fence,
                    signaled == 0 ? "y" : "n");
+      pipe_mutex_unlock(fenced_buf->mutex);
       curr = next; 
       next = curr->next;
    }
@@ -579,8 +634,8 @@ fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
       sched_yield();
 #endif
-      _fenced_buffer_list_check_free(fenced_list, 1);
       pipe_mutex_lock(fenced_list->mutex);
+      fenced_buffer_list_check_free_locked(fenced_list, 1);
    }
 
 #ifdef DEBUG
@@ -588,6 +643,7 @@ fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
 #endif
       
    pipe_mutex_unlock(fenced_list->mutex);
+   pipe_mutex_destroy(fenced_list->mutex);
    
    fenced_list->ops->destroy(fenced_list->ops);