1 /**************************************************************************
3 * Copyright 2007-2009 VMware, Inc.
4 * Copyright 2007-2010 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * Implementation of fenced buffers.
33 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
34 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
38 #include "pipe/p_config.h"
40 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
45 #include "pipe/p_compiler.h"
46 #include "pipe/p_defines.h"
47 #include "util/u_debug.h"
48 #include "pipe/p_thread.h"
49 #include "util/u_memory.h"
50 #include "util/u_double_list.h"
52 #include "pb_buffer.h"
53 #include "pb_buffer_fenced.h"
54 #include "pb_bufmgr.h"
59 * Convenience macro (type safe).
61 #define SUPER(__derived) (&(__derived)->base)
66 struct pb_manager base
;
67 struct pb_manager
*provider
;
68 struct pb_fence_ops
*ops
;
71 * Maximum buffer size that can be safely allocated.
73 pb_size max_buffer_size
;
76 * Maximum cpu memory we can allocate before we start waiting for the
79 pb_size max_cpu_total_size
;
82 * Following members are mutable and protected by this mutex.
89 * All fenced buffers are placed in this listed, ordered from the oldest
90 * fence to the newest fence.
92 struct list_head fenced
;
95 struct list_head unfenced
;
99 * How much temporary CPU memory is being used to hold unvalidated buffers.
101 pb_size cpu_total_size
;
108 * Wrapper around a pipe buffer which adds fencing and reference counting.
116 struct pb_buffer base
;
117 struct fenced_manager
*mgr
;
120 * Following members are mutable and protected by fenced_manager::mutex.
123 struct list_head head
;
126 * Buffer with storage.
128 struct pb_buffer
*buffer
;
133 * Temporary CPU storage data. Used when there isn't enough GPU memory to
139 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
146 struct pb_validate
*vl
;
147 unsigned validation_flags
;
149 struct pipe_fence_handle
*fence
;
153 static INLINE
struct fenced_manager
*
154 fenced_manager(struct pb_manager
*mgr
)
157 return (struct fenced_manager
*)mgr
;
161 static INLINE
struct fenced_buffer
*
162 fenced_buffer(struct pb_buffer
*buf
)
165 return (struct fenced_buffer
*)buf
;
170 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
);
172 static enum pipe_error
173 fenced_buffer_create_cpu_storage_locked(struct fenced_manager
*fenced_mgr
,
174 struct fenced_buffer
*fenced_buf
);
177 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
);
179 static enum pipe_error
180 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
181 struct fenced_buffer
*fenced_buf
,
184 static enum pipe_error
185 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
);
187 static enum pipe_error
188 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
);
192 * Dump the fenced buffer list.
194 * Useful to understand failures to allocate buffers.
197 fenced_manager_dump_locked(struct fenced_manager
*fenced_mgr
)
200 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
201 struct list_head
*curr
, *next
;
202 struct fenced_buffer
*fenced_buf
;
204 debug_printf("%10s %7s %8s %7s %10s %s\n",
205 "buffer", "size", "refcount", "storage", "fence", "signalled");
207 curr
= fenced_mgr
->unfenced
.next
;
209 while(curr
!= &fenced_mgr
->unfenced
) {
210 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
211 assert(!fenced_buf
->fence
);
212 debug_printf("%10p %7u %8u %7s\n",
214 fenced_buf
->base
.base
.size
,
215 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
),
216 fenced_buf
->buffer
? "gpu" : (fenced_buf
->data
? "cpu" : "none"));
221 curr
= fenced_mgr
->fenced
.next
;
223 while(curr
!= &fenced_mgr
->fenced
) {
225 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
226 assert(fenced_buf
->buffer
);
227 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
228 debug_printf("%10p %7u %8u %7s %10p %s\n",
230 fenced_buf
->base
.base
.size
,
231 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
),
233 (void *) fenced_buf
->fence
,
234 signaled
== 0 ? "y" : "n");
245 fenced_buffer_destroy_locked(struct fenced_manager
*fenced_mgr
,
246 struct fenced_buffer
*fenced_buf
)
248 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
250 assert(!fenced_buf
->fence
);
251 assert(fenced_buf
->head
.prev
);
252 assert(fenced_buf
->head
.next
);
253 LIST_DEL(&fenced_buf
->head
);
254 assert(fenced_mgr
->num_unfenced
);
255 --fenced_mgr
->num_unfenced
;
257 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
258 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
265 * Add the buffer to the fenced list.
267 * Reference count should be incremented before calling this function.
270 fenced_buffer_add_locked(struct fenced_manager
*fenced_mgr
,
271 struct fenced_buffer
*fenced_buf
)
273 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
274 assert(fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
275 assert(fenced_buf
->fence
);
277 p_atomic_inc(&fenced_buf
->base
.base
.reference
.count
);
279 LIST_DEL(&fenced_buf
->head
);
280 assert(fenced_mgr
->num_unfenced
);
281 --fenced_mgr
->num_unfenced
;
282 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->fenced
);
283 ++fenced_mgr
->num_fenced
;
288 * Remove the buffer from the fenced list, and potentially destroy the buffer
289 * if the reference count reaches zero.
291 * Returns TRUE if the buffer was detroyed.
293 static INLINE boolean
294 fenced_buffer_remove_locked(struct fenced_manager
*fenced_mgr
,
295 struct fenced_buffer
*fenced_buf
)
297 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
299 assert(fenced_buf
->fence
);
300 assert(fenced_buf
->mgr
== fenced_mgr
);
302 ops
->fence_reference(ops
, &fenced_buf
->fence
, NULL
);
303 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
305 assert(fenced_buf
->head
.prev
);
306 assert(fenced_buf
->head
.next
);
308 LIST_DEL(&fenced_buf
->head
);
309 assert(fenced_mgr
->num_fenced
);
310 --fenced_mgr
->num_fenced
;
312 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
313 ++fenced_mgr
->num_unfenced
;
315 if (p_atomic_dec_zero(&fenced_buf
->base
.base
.reference
.count
)) {
316 fenced_buffer_destroy_locked(fenced_mgr
, fenced_buf
);
325 * Wait for the fence to expire, and remove it from the fenced list.
327 * This function will release and re-aquire the mutex, so any copy of mutable
328 * state must be discarded after calling it.
330 static INLINE
enum pipe_error
331 fenced_buffer_finish_locked(struct fenced_manager
*fenced_mgr
,
332 struct fenced_buffer
*fenced_buf
)
334 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
335 enum pipe_error ret
= PIPE_ERROR
;
338 debug_warning("waiting for GPU");
341 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
342 assert(fenced_buf
->fence
);
344 if(fenced_buf
->fence
) {
345 struct pipe_fence_handle
*fence
= NULL
;
349 ops
->fence_reference(ops
, &fence
, fenced_buf
->fence
);
351 pipe_mutex_unlock(fenced_mgr
->mutex
);
353 finished
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
355 pipe_mutex_lock(fenced_mgr
->mutex
);
357 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
360 * Only proceed if the fence object didn't change in the meanwhile.
361 * Otherwise assume the work has been already carried out by another
362 * thread that re-aquired the lock before us.
364 proceed
= fence
== fenced_buf
->fence
? TRUE
: FALSE
;
366 ops
->fence_reference(ops
, &fence
, NULL
);
368 if(proceed
&& finished
== 0) {
370 * Remove from the fenced list
375 destroyed
= fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
377 /* TODO: remove consequents buffers with the same fence? */
381 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
392 * Remove as many fenced buffers from the fenced list as possible.
394 * Returns TRUE if at least one buffer was removed.
397 fenced_manager_check_signalled_locked(struct fenced_manager
*fenced_mgr
,
400 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
401 struct list_head
*curr
, *next
;
402 struct fenced_buffer
*fenced_buf
;
403 struct pb_buffer
*pb_buf
;
404 struct pipe_fence_handle
*prev_fence
= NULL
;
407 curr
= fenced_mgr
->fenced
.next
;
409 while(curr
!= &fenced_mgr
->fenced
) {
410 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
412 if(fenced_buf
->fence
!= prev_fence
) {
416 signaled
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
419 * Don't return just now. Instead preemptively check if the
420 * following buffers' fences already expired, without further waits.
425 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
432 prev_fence
= fenced_buf
->fence
;
435 /* This buffer's fence object is identical to the previous buffer's
436 * fence object, so no need to check the fence again.
438 assert(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0);
441 fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
454 * Try to free some GPU memory by backing it up into CPU memory.
456 * Returns TRUE if at least one buffer was freed.
459 fenced_manager_free_gpu_storage_locked(struct fenced_manager
*fenced_mgr
)
461 struct list_head
*curr
, *next
;
462 struct fenced_buffer
*fenced_buf
;
464 curr
= fenced_mgr
->unfenced
.next
;
466 while(curr
!= &fenced_mgr
->unfenced
) {
467 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
470 * We can only move storage if the buffer is not mapped and not
473 if(fenced_buf
->buffer
&&
474 !fenced_buf
->mapcount
&&
478 ret
= fenced_buffer_create_cpu_storage_locked(fenced_mgr
, fenced_buf
);
480 ret
= fenced_buffer_copy_storage_to_cpu_locked(fenced_buf
);
482 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
485 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
498 * Destroy CPU storage for this buffer.
501 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
)
503 if(fenced_buf
->data
) {
504 align_free(fenced_buf
->data
);
505 fenced_buf
->data
= NULL
;
506 assert(fenced_buf
->mgr
->cpu_total_size
>= fenced_buf
->size
);
507 fenced_buf
->mgr
->cpu_total_size
-= fenced_buf
->size
;
513 * Create CPU storage for this buffer.
515 static enum pipe_error
516 fenced_buffer_create_cpu_storage_locked(struct fenced_manager
*fenced_mgr
,
517 struct fenced_buffer
*fenced_buf
)
519 assert(!fenced_buf
->data
);
523 if (fenced_mgr
->cpu_total_size
+ fenced_buf
->size
> fenced_mgr
->max_cpu_total_size
)
524 return PIPE_ERROR_OUT_OF_MEMORY
;
526 fenced_buf
->data
= align_malloc(fenced_buf
->size
, fenced_buf
->desc
.alignment
);
527 if(!fenced_buf
->data
)
528 return PIPE_ERROR_OUT_OF_MEMORY
;
530 fenced_mgr
->cpu_total_size
+= fenced_buf
->size
;
537 * Destroy the GPU storage.
540 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
)
542 if(fenced_buf
->buffer
) {
543 pb_reference(&fenced_buf
->buffer
, NULL
);
549 * Try to create GPU storage for this buffer.
551 * This function is a shorthand around pb_manager::create_buffer for
552 * fenced_buffer_create_gpu_storage_locked()'s benefit.
554 static INLINE boolean
555 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
556 struct fenced_buffer
*fenced_buf
)
558 struct pb_manager
*provider
= fenced_mgr
->provider
;
560 assert(!fenced_buf
->buffer
);
562 fenced_buf
->buffer
= provider
->create_buffer(fenced_mgr
->provider
,
565 return fenced_buf
->buffer
? TRUE
: FALSE
;
570 * Create GPU storage for this buffer.
572 static enum pipe_error
573 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
574 struct fenced_buffer
*fenced_buf
,
577 assert(!fenced_buf
->buffer
);
580 * Check for signaled buffers before trying to allocate.
582 fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
);
584 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
587 * Keep trying while there is some sort of progress:
588 * - fences are expiring,
589 * - or buffers are being being swapped out from GPU memory into CPU memory.
591 while(!fenced_buf
->buffer
&&
592 (fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
) ||
593 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
594 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
597 if(!fenced_buf
->buffer
&& wait
) {
599 * Same as before, but this time around, wait to free buffers if
602 while(!fenced_buf
->buffer
&&
603 (fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
) ||
604 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
605 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
609 if(!fenced_buf
->buffer
) {
611 fenced_manager_dump_locked(fenced_mgr
);
614 return PIPE_ERROR_OUT_OF_MEMORY
;
621 static enum pipe_error
622 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
)
626 assert(fenced_buf
->data
);
627 assert(fenced_buf
->buffer
);
629 map
= pb_map(fenced_buf
->buffer
, PIPE_BUFFER_USAGE_CPU_WRITE
);
633 memcpy(map
, fenced_buf
->data
, fenced_buf
->size
);
635 pb_unmap(fenced_buf
->buffer
);
641 static enum pipe_error
642 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
)
646 assert(fenced_buf
->data
);
647 assert(fenced_buf
->buffer
);
649 map
= pb_map(fenced_buf
->buffer
, PIPE_BUFFER_USAGE_CPU_READ
);
653 memcpy(fenced_buf
->data
, map
, fenced_buf
->size
);
655 pb_unmap(fenced_buf
->buffer
);
662 fenced_buffer_destroy(struct pb_buffer
*buf
)
664 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
665 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
667 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
669 pipe_mutex_lock(fenced_mgr
->mutex
);
671 fenced_buffer_destroy_locked(fenced_mgr
, fenced_buf
);
673 pipe_mutex_unlock(fenced_mgr
->mutex
);
678 fenced_buffer_map(struct pb_buffer
*buf
,
681 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
682 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
683 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
686 pipe_mutex_lock(fenced_mgr
->mutex
);
688 assert(!(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
693 while((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_WRITE
) ||
694 ((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ
) &&
695 (flags
& PIPE_BUFFER_USAGE_CPU_WRITE
))) {
698 * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
700 if((flags
& PIPE_BUFFER_USAGE_DONTBLOCK
) &&
701 ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0) {
705 if (flags
& PIPE_BUFFER_USAGE_UNSYNCHRONIZED
) {
710 * Wait for the GPU to finish accessing. This will release and re-acquire
711 * the mutex, so all copies of mutable state must be discarded.
713 fenced_buffer_finish_locked(fenced_mgr
, fenced_buf
);
716 if(fenced_buf
->buffer
) {
717 map
= pb_map(fenced_buf
->buffer
, flags
);
720 assert(fenced_buf
->data
);
721 map
= fenced_buf
->data
;
725 ++fenced_buf
->mapcount
;
726 fenced_buf
->flags
|= flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
730 pipe_mutex_unlock(fenced_mgr
->mutex
);
737 fenced_buffer_unmap(struct pb_buffer
*buf
)
739 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
740 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
742 pipe_mutex_lock(fenced_mgr
->mutex
);
744 assert(fenced_buf
->mapcount
);
745 if(fenced_buf
->mapcount
) {
746 if (fenced_buf
->buffer
)
747 pb_unmap(fenced_buf
->buffer
);
748 --fenced_buf
->mapcount
;
749 if(!fenced_buf
->mapcount
)
750 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
753 pipe_mutex_unlock(fenced_mgr
->mutex
);
757 static enum pipe_error
758 fenced_buffer_validate(struct pb_buffer
*buf
,
759 struct pb_validate
*vl
,
762 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
763 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
766 pipe_mutex_lock(fenced_mgr
->mutex
);
770 fenced_buf
->vl
= NULL
;
771 fenced_buf
->validation_flags
= 0;
776 assert(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
777 assert(!(flags
& ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
778 flags
&= PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
780 /* Buffer cannot be validated in two different lists */
781 if(fenced_buf
->vl
&& fenced_buf
->vl
!= vl
) {
782 ret
= PIPE_ERROR_RETRY
;
786 if(fenced_buf
->vl
== vl
&&
787 (fenced_buf
->validation_flags
& flags
) == flags
) {
788 /* Nothing to do -- buffer already validated */
794 * Create and update GPU storage.
796 if(!fenced_buf
->buffer
) {
797 assert(!fenced_buf
->mapcount
);
799 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, TRUE
);
804 ret
= fenced_buffer_copy_storage_to_gpu_locked(fenced_buf
);
806 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
810 if(fenced_buf
->mapcount
) {
811 debug_printf("warning: validating a buffer while it is still mapped\n");
814 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
818 ret
= pb_validate(fenced_buf
->buffer
, vl
, flags
);
823 fenced_buf
->validation_flags
|= flags
;
826 pipe_mutex_unlock(fenced_mgr
->mutex
);
833 fenced_buffer_fence(struct pb_buffer
*buf
,
834 struct pipe_fence_handle
*fence
)
836 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
837 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
838 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
840 pipe_mutex_lock(fenced_mgr
->mutex
);
842 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
843 assert(fenced_buf
->buffer
);
845 if(fence
!= fenced_buf
->fence
) {
846 assert(fenced_buf
->vl
);
847 assert(fenced_buf
->validation_flags
);
849 if (fenced_buf
->fence
) {
851 destroyed
= fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
855 ops
->fence_reference(ops
, &fenced_buf
->fence
, fence
);
856 fenced_buf
->flags
|= fenced_buf
->validation_flags
;
857 fenced_buffer_add_locked(fenced_mgr
, fenced_buf
);
860 pb_fence(fenced_buf
->buffer
, fence
);
862 fenced_buf
->vl
= NULL
;
863 fenced_buf
->validation_flags
= 0;
866 pipe_mutex_unlock(fenced_mgr
->mutex
);
871 fenced_buffer_get_base_buffer(struct pb_buffer
*buf
,
872 struct pb_buffer
**base_buf
,
875 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
876 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
878 pipe_mutex_lock(fenced_mgr
->mutex
);
881 * This should only be called when the buffer is validated. Typically
882 * when processing relocations.
884 assert(fenced_buf
->vl
);
885 assert(fenced_buf
->buffer
);
887 if(fenced_buf
->buffer
)
888 pb_get_base_buffer(fenced_buf
->buffer
, base_buf
, offset
);
894 pipe_mutex_unlock(fenced_mgr
->mutex
);
898 static const struct pb_vtbl
899 fenced_buffer_vtbl
= {
900 fenced_buffer_destroy
,
903 fenced_buffer_validate
,
905 fenced_buffer_get_base_buffer
910 * Wrap a buffer in a fenced buffer.
912 static struct pb_buffer
*
913 fenced_bufmgr_create_buffer(struct pb_manager
*mgr
,
915 const struct pb_desc
*desc
)
917 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
918 struct fenced_buffer
*fenced_buf
;
922 * Don't stall the GPU, waste time evicting buffers, or waste memory
923 * trying to create a buffer that will most likely never fit into the
926 if(size
> fenced_mgr
->max_buffer_size
) {
930 fenced_buf
= CALLOC_STRUCT(fenced_buffer
);
934 pipe_reference_init(&fenced_buf
->base
.base
.reference
, 1);
935 fenced_buf
->base
.base
.alignment
= desc
->alignment
;
936 fenced_buf
->base
.base
.usage
= desc
->usage
;
937 fenced_buf
->base
.base
.size
= size
;
938 fenced_buf
->size
= size
;
939 fenced_buf
->desc
= *desc
;
941 fenced_buf
->base
.vtbl
= &fenced_buffer_vtbl
;
942 fenced_buf
->mgr
= fenced_mgr
;
944 pipe_mutex_lock(fenced_mgr
->mutex
);
947 * Try to create GPU storage without stalling,
949 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, FALSE
);
952 * Attempt to use CPU memory to avoid stalling the GPU.
955 ret
= fenced_buffer_create_cpu_storage_locked(fenced_mgr
, fenced_buf
);
959 * Create GPU storage, waiting for some to be available.
962 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, TRUE
);
972 assert(fenced_buf
->buffer
|| fenced_buf
->data
);
974 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
975 ++fenced_mgr
->num_unfenced
;
976 pipe_mutex_unlock(fenced_mgr
->mutex
);
978 return &fenced_buf
->base
;
981 pipe_mutex_unlock(fenced_mgr
->mutex
);
989 fenced_bufmgr_flush(struct pb_manager
*mgr
)
991 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
993 pipe_mutex_lock(fenced_mgr
->mutex
);
994 while(fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
996 pipe_mutex_unlock(fenced_mgr
->mutex
);
998 assert(fenced_mgr
->provider
->flush
);
999 if(fenced_mgr
->provider
->flush
)
1000 fenced_mgr
->provider
->flush(fenced_mgr
->provider
);
1005 fenced_bufmgr_destroy(struct pb_manager
*mgr
)
1007 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
1009 pipe_mutex_lock(fenced_mgr
->mutex
);
1011 /* Wait on outstanding fences */
1012 while (fenced_mgr
->num_fenced
) {
1013 pipe_mutex_unlock(fenced_mgr
->mutex
);
1014 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
1017 pipe_mutex_lock(fenced_mgr
->mutex
);
1018 while(fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
1023 /*assert(!fenced_mgr->num_unfenced);*/
1026 pipe_mutex_unlock(fenced_mgr
->mutex
);
1027 pipe_mutex_destroy(fenced_mgr
->mutex
);
1029 if(fenced_mgr
->provider
)
1030 fenced_mgr
->provider
->destroy(fenced_mgr
->provider
);
1032 fenced_mgr
->ops
->destroy(fenced_mgr
->ops
);
1039 fenced_bufmgr_create(struct pb_manager
*provider
,
1040 struct pb_fence_ops
*ops
,
1041 pb_size max_buffer_size
,
1042 pb_size max_cpu_total_size
)
1044 struct fenced_manager
*fenced_mgr
;
1049 fenced_mgr
= CALLOC_STRUCT(fenced_manager
);
1053 fenced_mgr
->base
.destroy
= fenced_bufmgr_destroy
;
1054 fenced_mgr
->base
.create_buffer
= fenced_bufmgr_create_buffer
;
1055 fenced_mgr
->base
.flush
= fenced_bufmgr_flush
;
1057 fenced_mgr
->provider
= provider
;
1058 fenced_mgr
->ops
= ops
;
1059 fenced_mgr
->max_buffer_size
= max_buffer_size
;
1060 fenced_mgr
->max_cpu_total_size
= max_cpu_total_size
;
1062 LIST_INITHEAD(&fenced_mgr
->fenced
);
1063 fenced_mgr
->num_fenced
= 0;
1065 LIST_INITHEAD(&fenced_mgr
->unfenced
);
1066 fenced_mgr
->num_unfenced
= 0;
1068 pipe_mutex_init(fenced_mgr
->mutex
);
1070 return &fenced_mgr
->base
;