#include "util/u_debug.h"
#include "os/os_thread.h"
#include "util/u_memory.h"
-#include "util/u_double_list.h"
+#include "util/list.h"
#include "pb_buffer.h"
#include "pb_buffer_fenced.h"
assert(!fenced_buf->fence);
debug_printf("%10p %7u %8u %7s\n",
(void *) fenced_buf,
- fenced_buf->base.base.size,
- p_atomic_read(&fenced_buf->base.base.reference.count),
+ fenced_buf->base.size,
+ p_atomic_read(&fenced_buf->base.reference.count),
fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
curr = next;
next = curr->next;
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
debug_printf("%10p %7u %8u %7s %10p %s\n",
(void *) fenced_buf,
- fenced_buf->base.base.size,
- p_atomic_read(&fenced_buf->base.base.reference.count),
+ fenced_buf->base.size,
+ p_atomic_read(&fenced_buf->base.reference.count),
"gpu",
(void *) fenced_buf->fence,
signaled == 0 ? "y" : "n");
fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
struct fenced_buffer *fenced_buf)
{
- assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(!pipe_is_referenced(&fenced_buf->base.reference));
assert(!fenced_buf->fence);
assert(fenced_buf->head.prev);
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
struct fenced_buffer *fenced_buf)
{
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
assert(fenced_buf->fence);
- p_atomic_inc(&fenced_buf->base.base.reference.count);
+ p_atomic_inc(&fenced_buf->base.reference.count);
LIST_DEL(&fenced_buf->head);
assert(fenced_mgr->num_unfenced);
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
- if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
+ if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
return TRUE;
}
/**
* Wait for the fence to expire, and remove it from the fenced list.
*
- * This function will release and re-aquire the mutex, so any copy of mutable
+ * This function will release and re-acquire the mutex, so any copy of mutable
* state must be discarded after calling it.
*/
static INLINE enum pipe_error
debug_warning("waiting for GPU");
#endif
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->fence);
if(fenced_buf->fence) {
pipe_mutex_lock(fenced_mgr->mutex);
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
/*
* Only proceed if the fence object didn't change in the meanwhile.
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(!pipe_is_referenced(&fenced_buf->base.reference));
pipe_mutex_lock(fenced_mgr->mutex);
pipe_mutex_lock(fenced_mgr->mutex);
- assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
if(fence != fenced_buf->fence) {
if(!fenced_buf)
goto no_buffer;
- pipe_reference_init(&fenced_buf->base.base.reference, 1);
- fenced_buf->base.base.alignment = desc->alignment;
- fenced_buf->base.base.usage = desc->usage;
- fenced_buf->base.base.size = size;
+ pipe_reference_init(&fenced_buf->base.reference, 1);
+ fenced_buf->base.alignment = desc->alignment;
+ fenced_buf->base.usage = desc->usage;
+ fenced_buf->base.size = size;
fenced_buf->size = size;
fenced_buf->desc = *desc;