X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fauxiliary%2Fpipebuffer%2Fpb_buffer_fenced.c;h=b8b53fd8275799a0b801995bdf0f62eb6fbbef81;hb=255de06c5990797832678d7af01876a1afca5b50;hp=d6cf64058253b0b53e8dedbcec649c4942683e49;hpb=35489ef285f1fde234b2b9bbb91fdc41fddefc02;p=mesa.git diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c index d6cf6405825..b8b53fd8275 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c @@ -40,13 +40,14 @@ #include #include #endif +#include #include "pipe/p_compiler.h" #include "pipe/p_defines.h" #include "util/u_debug.h" #include "os/os_thread.h" #include "util/u_memory.h" -#include "util/u_double_list.h" +#include "util/list.h" #include "pb_buffer.h" #include "pb_buffer_fenced.h" @@ -80,7 +81,7 @@ struct fenced_manager /** * Following members are mutable and protected by this mutex. */ - pipe_mutex mutex; + mtx_t mutex; /** * Fenced buffer list. @@ -108,14 +109,14 @@ struct fenced_manager */ struct fenced_buffer { - /* + /** * Immutable members. */ struct pb_buffer base; struct fenced_manager *mgr; - /* + /** * Following members are mutable and protected by fenced_manager::mutex. */ @@ -138,7 +139,7 @@ struct fenced_buffer * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current * buffer usage. */ - unsigned flags; + enum pb_usage_flags flags; unsigned mapcount; @@ -149,7 +150,7 @@ struct fenced_buffer }; -static INLINE struct fenced_manager * +static inline struct fenced_manager * fenced_manager(struct pb_manager *mgr) { assert(mgr); @@ -157,7 +158,7 @@ fenced_manager(struct pb_manager *mgr) } -static INLINE struct fenced_buffer * +static inline struct fenced_buffer * fenced_buffer(struct pb_buffer *buf) { assert(buf); @@ -205,13 +206,13 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr) curr = fenced_mgr->unfenced.next; next = curr->next; - while(curr != &fenced_mgr->unfenced) { + while (curr != &fenced_mgr->unfenced) { fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); assert(!fenced_buf->fence); - debug_printf("%10p %7u %8u %7s\n", + debug_printf("%10p %"PRIu64" %8u %7s\n", (void *) fenced_buf, - fenced_buf->base.base.size, - p_atomic_read(&fenced_buf->base.base.reference.count), + fenced_buf->base.size, + p_atomic_read(&fenced_buf->base.reference.count), fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none")); curr = next; next = curr->next; @@ -219,15 +220,15 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr) curr = fenced_mgr->fenced.next; next = curr->next; - while(curr != &fenced_mgr->fenced) { + while (curr != &fenced_mgr->fenced) { int signaled; fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); assert(fenced_buf->buffer); signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); - debug_printf("%10p %7u %8u %7s %10p %s\n", + debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n", (void *) fenced_buf, - fenced_buf->base.base.size, - p_atomic_read(&fenced_buf->base.base.reference.count), + fenced_buf->base.size, + p_atomic_read(&fenced_buf->base.reference.count), "gpu", (void *) fenced_buf->fence, signaled == 0 ? "y" : "n"); @@ -240,11 +241,11 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr) } -static INLINE void +static inline void fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { - assert(!pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(!pipe_is_referenced(&fenced_buf->base.reference)); assert(!fenced_buf->fence); assert(fenced_buf->head.prev); @@ -265,20 +266,20 @@ fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr, * * Reference count should be incremented before calling this function. */ -static INLINE void +static inline void fenced_buffer_add_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { - assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(pipe_is_referenced(&fenced_buf->base.reference)); assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE); assert(fenced_buf->fence); - p_atomic_inc(&fenced_buf->base.base.reference.count); + p_atomic_inc(&fenced_buf->base.reference.count); LIST_DEL(&fenced_buf->head); assert(fenced_mgr->num_unfenced); --fenced_mgr->num_unfenced; - LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced); + list_addtail(&fenced_buf->head, &fenced_mgr->fenced); ++fenced_mgr->num_fenced; } @@ -289,7 +290,7 @@ fenced_buffer_add_locked(struct fenced_manager *fenced_mgr, * * Returns TRUE if the buffer was detroyed. */ -static INLINE boolean +static inline boolean fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { @@ -308,10 +309,10 @@ fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr, assert(fenced_mgr->num_fenced); --fenced_mgr->num_fenced; - LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); + list_addtail(&fenced_buf->head, &fenced_mgr->unfenced); ++fenced_mgr->num_unfenced; - if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) { + if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) { fenced_buffer_destroy_locked(fenced_mgr, fenced_buf); return TRUE; } @@ -323,10 +324,10 @@ fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr, /** * Wait for the fence to expire, and remove it from the fenced list. * - * This function will release and re-aquire the mutex, so any copy of mutable + * This function will release and re-acquire the mutex, so any copy of mutable * state must be discarded after calling it. */ -static INLINE enum pipe_error +static inline enum pipe_error fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { @@ -337,26 +338,25 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr, debug_warning("waiting for GPU"); #endif - assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(pipe_is_referenced(&fenced_buf->base.reference)); assert(fenced_buf->fence); - if(fenced_buf->fence) { + if (fenced_buf->fence) { struct pipe_fence_handle *fence = NULL; int finished; boolean proceed; ops->fence_reference(ops, &fence, fenced_buf->fence); - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); finished = ops->fence_finish(ops, fenced_buf->fence, 0); - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); - assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(pipe_is_referenced(&fenced_buf->base.reference)); - /* - * Only proceed if the fence object didn't change in the meanwhile. + /* Only proceed if the fence object didn't change in the meanwhile. * Otherwise assume the work has been already carried out by another * thread that re-aquired the lock before us. */ @@ -364,18 +364,14 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr, ops->fence_reference(ops, &fence, NULL); - if(proceed && finished == 0) { - /* - * Remove from the fenced list - */ - - boolean destroyed; - - destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + if (proceed && finished == 0) { + /* Remove from the fenced list. */ + boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); /* TODO: remove consequents buffers with the same fence? */ assert(!destroyed); + (void) destroyed; /* silence unused var warning for non-debug build */ fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE; @@ -404,36 +400,33 @@ fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr, curr = fenced_mgr->fenced.next; next = curr->next; - while(curr != &fenced_mgr->fenced) { + while (curr != &fenced_mgr->fenced) { fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); - if(fenced_buf->fence != prev_fence) { - int signaled; + if (fenced_buf->fence != prev_fence) { + int signaled; - if (wait) { - signaled = ops->fence_finish(ops, fenced_buf->fence, 0); + if (wait) { + signaled = ops->fence_finish(ops, fenced_buf->fence, 0); - /* - * Don't return just now. Instead preemptively check if the - * following buffers' fences already expired, without further waits. - */ - wait = FALSE; - } - else { - signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); - } + /* Don't return just now. Instead preemptively check if the + * following buffers' fences already expired, without further waits. + */ + wait = FALSE; + } else { + signaled = ops->fence_signalled(ops, fenced_buf->fence, 0); + } - if (signaled != 0) { - return ret; + if (signaled != 0) { + return ret; } - prev_fence = fenced_buf->fence; - } - else { + prev_fence = fenced_buf->fence; + } else { /* This buffer's fence object is identical to the previous buffer's * fence object, so no need to check the fence again. */ - assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0); + assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0); } fenced_buffer_remove_locked(fenced_mgr, fenced_buf); @@ -461,22 +454,21 @@ fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr) curr = fenced_mgr->unfenced.next; next = curr->next; - while(curr != &fenced_mgr->unfenced) { + while (curr != &fenced_mgr->unfenced) { fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); - /* - * We can only move storage if the buffer is not mapped and not + /* We can only move storage if the buffer is not mapped and not * validated. */ - if(fenced_buf->buffer && + if (fenced_buf->buffer && !fenced_buf->mapcount && !fenced_buf->vl) { enum pipe_error ret; ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); - if(ret == PIPE_OK) { + if (ret == PIPE_OK) { ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf); - if(ret == PIPE_OK) { + if (ret == PIPE_OK) { fenced_buffer_destroy_gpu_storage_locked(fenced_buf); return TRUE; } @@ -498,7 +490,7 @@ fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr) static void fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf) { - if(fenced_buf->data) { + if (fenced_buf->data) { align_free(fenced_buf->data); fenced_buf->data = NULL; assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size); @@ -515,14 +507,14 @@ fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { assert(!fenced_buf->data); - if(fenced_buf->data) + if (fenced_buf->data) return PIPE_OK; if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size) return PIPE_ERROR_OUT_OF_MEMORY; fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment); - if(!fenced_buf->data) + if (!fenced_buf->data) return PIPE_ERROR_OUT_OF_MEMORY; fenced_mgr->cpu_total_size += fenced_buf->size; @@ -537,7 +529,7 @@ fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr, static void fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf) { - if(fenced_buf->buffer) { + if (fenced_buf->buffer) { pb_reference(&fenced_buf->buffer, NULL); } } @@ -549,7 +541,7 @@ fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf) * This function is a shorthand around pb_manager::create_buffer for * fenced_buffer_create_gpu_storage_locked()'s benefit. */ -static INLINE boolean +static inline boolean fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, struct fenced_buffer *fenced_buf) { @@ -574,41 +566,37 @@ fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr, { assert(!fenced_buf->buffer); - /* - * Check for signaled buffers before trying to allocate. - */ + /* Check for signaled buffers before trying to allocate. */ fenced_manager_check_signalled_locked(fenced_mgr, FALSE); fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); - /* - * Keep trying while there is some sort of progress: + /* Keep trying while there is some sort of progress: * - fences are expiring, * - or buffers are being being swapped out from GPU memory into CPU memory. */ - while(!fenced_buf->buffer && + while (!fenced_buf->buffer && (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) || fenced_manager_free_gpu_storage_locked(fenced_mgr))) { fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); } - if(!fenced_buf->buffer && wait) { - /* - * Same as before, but this time around, wait to free buffers if + if (!fenced_buf->buffer && wait) { + /* Same as before, but this time around, wait to free buffers if * necessary. */ - while(!fenced_buf->buffer && + while (!fenced_buf->buffer && (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) || fenced_manager_free_gpu_storage_locked(fenced_mgr))) { fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf); } } - if(!fenced_buf->buffer) { - if(0) + if (!fenced_buf->buffer) { + if (0) fenced_manager_dump_locked(fenced_mgr); - /* give up */ + /* Give up. */ return PIPE_ERROR_OUT_OF_MEMORY; } @@ -624,8 +612,8 @@ fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf) assert(fenced_buf->data); assert(fenced_buf->buffer); - map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE); - if(!map) + map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL); + if (!map) return PIPE_ERROR; memcpy(map, fenced_buf->data, fenced_buf->size); @@ -644,8 +632,8 @@ fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf) assert(fenced_buf->data); assert(fenced_buf->buffer); - map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ); - if(!map) + map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ, NULL); + if (!map) return PIPE_ERROR; memcpy(fenced_buf->data, map, fenced_buf->size); @@ -662,41 +650,39 @@ fenced_buffer_destroy(struct pb_buffer *buf) struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; - assert(!pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(!pipe_is_referenced(&fenced_buf->base.reference)); - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); fenced_buffer_destroy_locked(fenced_mgr, fenced_buf); - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); } static void * fenced_buffer_map(struct pb_buffer *buf, - unsigned flags) + enum pb_usage_flags flags, void *flush_ctx) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; struct pb_fence_ops *ops = fenced_mgr->ops; void *map = NULL; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); assert(!(flags & PB_USAGE_GPU_READ_WRITE)); - /* - * Serialize writes. - */ - while((fenced_buf->flags & PB_USAGE_GPU_WRITE) || + /* Serialize writes. */ + while ((fenced_buf->flags & PB_USAGE_GPU_WRITE) || ((fenced_buf->flags & PB_USAGE_GPU_READ) && (flags & PB_USAGE_CPU_WRITE))) { - /* - * Don't wait for the GPU to finish accessing it, if blocking is forbidden. + /* Don't wait for the GPU to finish accessing it, + * if blocking is forbidden. */ - if((flags & PB_USAGE_DONTBLOCK) && - ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) { + if ((flags & PB_USAGE_DONTBLOCK) && + ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) { goto done; } @@ -704,28 +690,26 @@ fenced_buffer_map(struct pb_buffer *buf, break; } - /* - * Wait for the GPU to finish accessing. This will release and re-acquire + /* Wait for the GPU to finish accessing. This will release and re-acquire * the mutex, so all copies of mutable state must be discarded. */ fenced_buffer_finish_locked(fenced_mgr, fenced_buf); } - if(fenced_buf->buffer) { - map = pb_map(fenced_buf->buffer, flags); - } - else { + if (fenced_buf->buffer) { + map = pb_map(fenced_buf->buffer, flags, flush_ctx); + } else { assert(fenced_buf->data); map = fenced_buf->data; } - if(map) { + if (map) { ++fenced_buf->mapcount; fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE; } -done: - pipe_mutex_unlock(fenced_mgr->mutex); + done: + mtx_unlock(&fenced_mgr->mutex); return map; } @@ -737,34 +721,34 @@ fenced_buffer_unmap(struct pb_buffer *buf) struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); assert(fenced_buf->mapcount); - if(fenced_buf->mapcount) { + if (fenced_buf->mapcount) { if (fenced_buf->buffer) pb_unmap(fenced_buf->buffer); --fenced_buf->mapcount; - if(!fenced_buf->mapcount) - fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE; + if (!fenced_buf->mapcount) + fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE; } - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); } static enum pipe_error fenced_buffer_validate(struct pb_buffer *buf, struct pb_validate *vl, - unsigned flags) + enum pb_usage_flags flags) { struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; enum pipe_error ret; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); - if(!vl) { - /* invalidate */ + if (!vl) { + /* Invalidate. */ fenced_buf->vl = NULL; fenced_buf->validation_flags = 0; ret = PIPE_OK; @@ -775,40 +759,37 @@ fenced_buffer_validate(struct pb_buffer *buf, assert(!(flags & ~PB_USAGE_GPU_READ_WRITE)); flags &= PB_USAGE_GPU_READ_WRITE; - /* Buffer cannot be validated in two different lists */ - if(fenced_buf->vl && fenced_buf->vl != vl) { + /* Buffer cannot be validated in two different lists. */ + if (fenced_buf->vl && fenced_buf->vl != vl) { ret = PIPE_ERROR_RETRY; goto done; } - if(fenced_buf->vl == vl && + if (fenced_buf->vl == vl && (fenced_buf->validation_flags & flags) == flags) { - /* Nothing to do -- buffer already validated */ + /* Nothing to do -- buffer already validated. */ ret = PIPE_OK; goto done; } - /* - * Create and update GPU storage. - */ - if(!fenced_buf->buffer) { + /* Create and update GPU storage. */ + if (!fenced_buf->buffer) { assert(!fenced_buf->mapcount); ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); - if(ret != PIPE_OK) { + if (ret != PIPE_OK) { goto done; } ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf); - if(ret != PIPE_OK) { + if (ret != PIPE_OK) { fenced_buffer_destroy_gpu_storage_locked(fenced_buf); goto done; } - if(fenced_buf->mapcount) { + if (fenced_buf->mapcount) { debug_printf("warning: validating a buffer while it is still mapped\n"); - } - else { + } else { fenced_buffer_destroy_cpu_storage_locked(fenced_buf); } } @@ -820,8 +801,8 @@ fenced_buffer_validate(struct pb_buffer *buf, fenced_buf->vl = vl; fenced_buf->validation_flags |= flags; -done: - pipe_mutex_unlock(fenced_mgr->mutex); + done: + mtx_unlock(&fenced_mgr->mutex); return ret; } @@ -835,18 +816,17 @@ fenced_buffer_fence(struct pb_buffer *buf, struct fenced_manager *fenced_mgr = fenced_buf->mgr; struct pb_fence_ops *ops = fenced_mgr->ops; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); - assert(pipe_is_referenced(&fenced_buf->base.base.reference)); + assert(pipe_is_referenced(&fenced_buf->base.reference)); assert(fenced_buf->buffer); - if(fence != fenced_buf->fence) { + if (fence != fenced_buf->fence) { assert(fenced_buf->vl); assert(fenced_buf->validation_flags); if (fenced_buf->fence) { - boolean destroyed; - destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); + ASSERTED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf); assert(!destroyed); } if (fence) { @@ -861,7 +841,7 @@ fenced_buffer_fence(struct pb_buffer *buf, fenced_buf->validation_flags = 0; } - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); } @@ -873,34 +853,33 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf, struct fenced_buffer *fenced_buf = fenced_buffer(buf); struct fenced_manager *fenced_mgr = fenced_buf->mgr; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); - /* - * This should only be called when the buffer is validated. Typically + /* This should only be called when the buffer is validated. Typically * when processing relocations. */ assert(fenced_buf->vl); assert(fenced_buf->buffer); - if(fenced_buf->buffer) + if (fenced_buf->buffer) { pb_get_base_buffer(fenced_buf->buffer, base_buf, offset); - else { + } else { *base_buf = buf; *offset = 0; } - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); } static const struct pb_vtbl fenced_buffer_vtbl = { - fenced_buffer_destroy, - fenced_buffer_map, - fenced_buffer_unmap, - fenced_buffer_validate, - fenced_buffer_fence, - fenced_buffer_get_base_buffer + fenced_buffer_destroy, + fenced_buffer_map, + fenced_buffer_unmap, + fenced_buffer_validate, + fenced_buffer_fence, + fenced_buffer_get_base_buffer }; @@ -916,69 +895,60 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr, struct fenced_buffer *fenced_buf; enum pipe_error ret; - /* - * Don't stall the GPU, waste time evicting buffers, or waste memory + /* Don't stall the GPU, waste time evicting buffers, or waste memory * trying to create a buffer that will most likely never fit into the * graphics aperture. */ - if(size > fenced_mgr->max_buffer_size) { + if (size > fenced_mgr->max_buffer_size) { goto no_buffer; } fenced_buf = CALLOC_STRUCT(fenced_buffer); - if(!fenced_buf) + if (!fenced_buf) goto no_buffer; - pipe_reference_init(&fenced_buf->base.base.reference, 1); - fenced_buf->base.base.alignment = desc->alignment; - fenced_buf->base.base.usage = desc->usage; - fenced_buf->base.base.size = size; + pipe_reference_init(&fenced_buf->base.reference, 1); + fenced_buf->base.alignment = desc->alignment; + fenced_buf->base.usage = desc->usage; + fenced_buf->base.size = size; fenced_buf->size = size; fenced_buf->desc = *desc; fenced_buf->base.vtbl = &fenced_buffer_vtbl; fenced_buf->mgr = fenced_mgr; - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); - /* - * Try to create GPU storage without stalling, - */ + /* Try to create GPU storage without stalling. */ ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE); - /* - * Attempt to use CPU memory to avoid stalling the GPU. - */ - if(ret != PIPE_OK) { + /* Attempt to use CPU memory to avoid stalling the GPU. */ + if (ret != PIPE_OK) { ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf); } - /* - * Create GPU storage, waiting for some to be available. - */ - if(ret != PIPE_OK) { + /* Create GPU storage, waiting for some to be available. */ + if (ret != PIPE_OK) { ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE); } - /* - * Give up. - */ - if(ret != PIPE_OK) { + /* Give up. */ + if (ret != PIPE_OK) { goto no_storage; } assert(fenced_buf->buffer || fenced_buf->data); - LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); + list_addtail(&fenced_buf->head, &fenced_mgr->unfenced); ++fenced_mgr->num_unfenced; - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); return &fenced_buf->base; -no_storage: - pipe_mutex_unlock(fenced_mgr->mutex); + no_storage: + mtx_unlock(&fenced_mgr->mutex); FREE(fenced_buf); -no_buffer: + no_buffer: return NULL; } @@ -988,13 +958,13 @@ fenced_bufmgr_flush(struct pb_manager *mgr) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); - pipe_mutex_lock(fenced_mgr->mutex); - while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) + mtx_lock(&fenced_mgr->mutex); + while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) ; - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); assert(fenced_mgr->provider->flush); - if(fenced_mgr->provider->flush) + if (fenced_mgr->provider->flush) fenced_mgr->provider->flush(fenced_mgr->provider); } @@ -1004,27 +974,27 @@ fenced_bufmgr_destroy(struct pb_manager *mgr) { struct fenced_manager *fenced_mgr = fenced_manager(mgr); - pipe_mutex_lock(fenced_mgr->mutex); + mtx_lock(&fenced_mgr->mutex); - /* Wait on outstanding fences */ + /* Wait on outstanding fences. */ while (fenced_mgr->num_fenced) { - pipe_mutex_unlock(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) sched_yield(); #endif - pipe_mutex_lock(fenced_mgr->mutex); - while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) + mtx_lock(&fenced_mgr->mutex); + while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE)) ; } #ifdef DEBUG - /*assert(!fenced_mgr->num_unfenced);*/ + /* assert(!fenced_mgr->num_unfenced); */ #endif - pipe_mutex_unlock(fenced_mgr->mutex); - pipe_mutex_destroy(fenced_mgr->mutex); + mtx_unlock(&fenced_mgr->mutex); + mtx_destroy(&fenced_mgr->mutex); - if(fenced_mgr->provider) + if (fenced_mgr->provider) fenced_mgr->provider->destroy(fenced_mgr->provider); fenced_mgr->ops->destroy(fenced_mgr->ops); @@ -1041,7 +1011,7 @@ fenced_bufmgr_create(struct pb_manager *provider, { struct fenced_manager *fenced_mgr; - if(!provider) + if (!provider) return NULL; fenced_mgr = CALLOC_STRUCT(fenced_manager); @@ -1057,13 +1027,13 @@ fenced_bufmgr_create(struct pb_manager *provider, fenced_mgr->max_buffer_size = max_buffer_size; fenced_mgr->max_cpu_total_size = max_cpu_total_size; - LIST_INITHEAD(&fenced_mgr->fenced); + list_inithead(&fenced_mgr->fenced); fenced_mgr->num_fenced = 0; - LIST_INITHEAD(&fenced_mgr->unfenced); + list_inithead(&fenced_mgr->unfenced); fenced_mgr->num_unfenced = 0; - pipe_mutex_init(fenced_mgr->mutex); + (void) mtx_init(&fenced_mgr->mutex, mtx_plain); return &fenced_mgr->base; }