1 /**************************************************************************
3 * Copyright 2007-2010 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Implementation of fenced buffers.
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
37 #include "pipe/p_config.h"
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
45 #include "pipe/p_compiler.h"
46 #include "pipe/p_defines.h"
47 #include "util/u_debug.h"
48 #include "os/os_thread.h"
49 #include "util/u_memory.h"
50 #include "util/list.h"
52 #include "pb_buffer.h"
53 #include "pb_buffer_fenced.h"
54 #include "pb_bufmgr.h"
59 * Convenience macro (type safe).
61 #define SUPER(__derived) (&(__derived)->base)
66 struct pb_manager base
;
67 struct pb_manager
*provider
;
68 struct pb_fence_ops
*ops
;
71 * Maximum buffer size that can be safely allocated.
73 pb_size max_buffer_size
;
76 * Maximum cpu memory we can allocate before we start waiting for the
79 pb_size max_cpu_total_size
;
82 * Following members are mutable and protected by this mutex.
89 * All fenced buffers are placed in this listed, ordered from the oldest
90 * fence to the newest fence.
92 struct list_head fenced
;
95 struct list_head unfenced
;
99 * How much temporary CPU memory is being used to hold unvalidated buffers.
101 pb_size cpu_total_size
;
108 * Wrapper around a pipe buffer which adds fencing and reference counting.
116 struct pb_buffer base
;
117 struct fenced_manager
*mgr
;
120 * Following members are mutable and protected by fenced_manager::mutex.
123 struct list_head head
;
126 * Buffer with storage.
128 struct pb_buffer
*buffer
;
133 * Temporary CPU storage data. Used when there isn't enough GPU memory to
139 * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
142 enum pb_usage_flags flags
;
146 struct pb_validate
*vl
;
147 unsigned validation_flags
;
149 struct pipe_fence_handle
*fence
;
153 static inline struct fenced_manager
*
154 fenced_manager(struct pb_manager
*mgr
)
157 return (struct fenced_manager
*)mgr
;
161 static inline struct fenced_buffer
*
162 fenced_buffer(struct pb_buffer
*buf
)
165 return (struct fenced_buffer
*)buf
;
170 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
);
172 static enum pipe_error
173 fenced_buffer_create_cpu_storage_locked(struct fenced_manager
*fenced_mgr
,
174 struct fenced_buffer
*fenced_buf
);
177 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
);
179 static enum pipe_error
180 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
181 struct fenced_buffer
*fenced_buf
,
184 static enum pipe_error
185 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
);
187 static enum pipe_error
188 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
);
192 * Dump the fenced buffer list.
194 * Useful to understand failures to allocate buffers.
197 fenced_manager_dump_locked(struct fenced_manager
*fenced_mgr
)
200 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
201 struct list_head
*curr
, *next
;
202 struct fenced_buffer
*fenced_buf
;
204 debug_printf("%10s %7s %8s %7s %10s %s\n",
205 "buffer", "size", "refcount", "storage", "fence", "signalled");
207 curr
= fenced_mgr
->unfenced
.next
;
209 while (curr
!= &fenced_mgr
->unfenced
) {
210 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
211 assert(!fenced_buf
->fence
);
212 debug_printf("%10p %"PRIu64
" %8u %7s\n",
214 fenced_buf
->base
.size
,
215 p_atomic_read(&fenced_buf
->base
.reference
.count
),
216 fenced_buf
->buffer
? "gpu" : (fenced_buf
->data
? "cpu" : "none"));
221 curr
= fenced_mgr
->fenced
.next
;
223 while (curr
!= &fenced_mgr
->fenced
) {
225 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
226 assert(fenced_buf
->buffer
);
227 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
228 debug_printf("%10p %"PRIu64
" %8u %7s %10p %s\n",
230 fenced_buf
->base
.size
,
231 p_atomic_read(&fenced_buf
->base
.reference
.count
),
233 (void *) fenced_buf
->fence
,
234 signaled
== 0 ? "y" : "n");
245 fenced_buffer_destroy_locked(struct fenced_manager
*fenced_mgr
,
246 struct fenced_buffer
*fenced_buf
)
248 assert(!pipe_is_referenced(&fenced_buf
->base
.reference
));
250 assert(!fenced_buf
->fence
);
251 assert(fenced_buf
->head
.prev
);
252 assert(fenced_buf
->head
.next
);
253 LIST_DEL(&fenced_buf
->head
);
254 assert(fenced_mgr
->num_unfenced
);
255 --fenced_mgr
->num_unfenced
;
257 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
258 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
265 * Add the buffer to the fenced list.
267 * Reference count should be incremented before calling this function.
270 fenced_buffer_add_locked(struct fenced_manager
*fenced_mgr
,
271 struct fenced_buffer
*fenced_buf
)
273 assert(pipe_is_referenced(&fenced_buf
->base
.reference
));
274 assert(fenced_buf
->flags
& PB_USAGE_GPU_READ_WRITE
);
275 assert(fenced_buf
->fence
);
277 p_atomic_inc(&fenced_buf
->base
.reference
.count
);
279 LIST_DEL(&fenced_buf
->head
);
280 assert(fenced_mgr
->num_unfenced
);
281 --fenced_mgr
->num_unfenced
;
282 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->fenced
);
283 ++fenced_mgr
->num_fenced
;
288 * Remove the buffer from the fenced list, and potentially destroy the buffer
289 * if the reference count reaches zero.
291 * Returns TRUE if the buffer was detroyed.
293 static inline boolean
294 fenced_buffer_remove_locked(struct fenced_manager
*fenced_mgr
,
295 struct fenced_buffer
*fenced_buf
)
297 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
299 assert(fenced_buf
->fence
);
300 assert(fenced_buf
->mgr
== fenced_mgr
);
302 ops
->fence_reference(ops
, &fenced_buf
->fence
, NULL
);
303 fenced_buf
->flags
&= ~PB_USAGE_GPU_READ_WRITE
;
305 assert(fenced_buf
->head
.prev
);
306 assert(fenced_buf
->head
.next
);
308 LIST_DEL(&fenced_buf
->head
);
309 assert(fenced_mgr
->num_fenced
);
310 --fenced_mgr
->num_fenced
;
312 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
313 ++fenced_mgr
->num_unfenced
;
315 if (p_atomic_dec_zero(&fenced_buf
->base
.reference
.count
)) {
316 fenced_buffer_destroy_locked(fenced_mgr
, fenced_buf
);
325 * Wait for the fence to expire, and remove it from the fenced list.
327 * This function will release and re-acquire the mutex, so any copy of mutable
328 * state must be discarded after calling it.
330 static inline enum pipe_error
331 fenced_buffer_finish_locked(struct fenced_manager
*fenced_mgr
,
332 struct fenced_buffer
*fenced_buf
)
334 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
335 enum pipe_error ret
= PIPE_ERROR
;
338 debug_warning("waiting for GPU");
341 assert(pipe_is_referenced(&fenced_buf
->base
.reference
));
342 assert(fenced_buf
->fence
);
344 if (fenced_buf
->fence
) {
345 struct pipe_fence_handle
*fence
= NULL
;
349 ops
->fence_reference(ops
, &fence
, fenced_buf
->fence
);
351 mtx_unlock(&fenced_mgr
->mutex
);
353 finished
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
355 mtx_lock(&fenced_mgr
->mutex
);
357 assert(pipe_is_referenced(&fenced_buf
->base
.reference
));
359 /* Only proceed if the fence object didn't change in the meanwhile.
360 * Otherwise assume the work has been already carried out by another
361 * thread that re-aquired the lock before us.
363 proceed
= fence
== fenced_buf
->fence
? TRUE
: FALSE
;
365 ops
->fence_reference(ops
, &fence
, NULL
);
367 if (proceed
&& finished
== 0) {
368 /* Remove from the fenced list. */
369 boolean destroyed
= fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
371 /* TODO: remove consequents buffers with the same fence? */
374 (void) destroyed
; /* silence unused var warning for non-debug build */
376 fenced_buf
->flags
&= ~PB_USAGE_GPU_READ_WRITE
;
387 * Remove as many fenced buffers from the fenced list as possible.
389 * Returns TRUE if at least one buffer was removed.
392 fenced_manager_check_signalled_locked(struct fenced_manager
*fenced_mgr
,
395 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
396 struct list_head
*curr
, *next
;
397 struct fenced_buffer
*fenced_buf
;
398 struct pipe_fence_handle
*prev_fence
= NULL
;
401 curr
= fenced_mgr
->fenced
.next
;
403 while (curr
!= &fenced_mgr
->fenced
) {
404 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
406 if (fenced_buf
->fence
!= prev_fence
) {
410 signaled
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
412 /* Don't return just now. Instead preemptively check if the
413 * following buffers' fences already expired, without further waits.
417 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
424 prev_fence
= fenced_buf
->fence
;
426 /* This buffer's fence object is identical to the previous buffer's
427 * fence object, so no need to check the fence again.
429 assert(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0);
432 fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
445 * Try to free some GPU memory by backing it up into CPU memory.
447 * Returns TRUE if at least one buffer was freed.
450 fenced_manager_free_gpu_storage_locked(struct fenced_manager
*fenced_mgr
)
452 struct list_head
*curr
, *next
;
453 struct fenced_buffer
*fenced_buf
;
455 curr
= fenced_mgr
->unfenced
.next
;
457 while (curr
!= &fenced_mgr
->unfenced
) {
458 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
460 /* We can only move storage if the buffer is not mapped and not
463 if (fenced_buf
->buffer
&&
464 !fenced_buf
->mapcount
&&
468 ret
= fenced_buffer_create_cpu_storage_locked(fenced_mgr
, fenced_buf
);
469 if (ret
== PIPE_OK
) {
470 ret
= fenced_buffer_copy_storage_to_cpu_locked(fenced_buf
);
471 if (ret
== PIPE_OK
) {
472 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
475 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
488 * Destroy CPU storage for this buffer.
491 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
)
493 if (fenced_buf
->data
) {
494 align_free(fenced_buf
->data
);
495 fenced_buf
->data
= NULL
;
496 assert(fenced_buf
->mgr
->cpu_total_size
>= fenced_buf
->size
);
497 fenced_buf
->mgr
->cpu_total_size
-= fenced_buf
->size
;
503 * Create CPU storage for this buffer.
505 static enum pipe_error
506 fenced_buffer_create_cpu_storage_locked(struct fenced_manager
*fenced_mgr
,
507 struct fenced_buffer
*fenced_buf
)
509 assert(!fenced_buf
->data
);
510 if (fenced_buf
->data
)
513 if (fenced_mgr
->cpu_total_size
+ fenced_buf
->size
> fenced_mgr
->max_cpu_total_size
)
514 return PIPE_ERROR_OUT_OF_MEMORY
;
516 fenced_buf
->data
= align_malloc(fenced_buf
->size
, fenced_buf
->desc
.alignment
);
517 if (!fenced_buf
->data
)
518 return PIPE_ERROR_OUT_OF_MEMORY
;
520 fenced_mgr
->cpu_total_size
+= fenced_buf
->size
;
527 * Destroy the GPU storage.
530 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
)
532 if (fenced_buf
->buffer
) {
533 pb_reference(&fenced_buf
->buffer
, NULL
);
539 * Try to create GPU storage for this buffer.
541 * This function is a shorthand around pb_manager::create_buffer for
542 * fenced_buffer_create_gpu_storage_locked()'s benefit.
544 static inline boolean
545 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
546 struct fenced_buffer
*fenced_buf
)
548 struct pb_manager
*provider
= fenced_mgr
->provider
;
550 assert(!fenced_buf
->buffer
);
552 fenced_buf
->buffer
= provider
->create_buffer(fenced_mgr
->provider
,
555 return fenced_buf
->buffer
? TRUE
: FALSE
;
560 * Create GPU storage for this buffer.
562 static enum pipe_error
563 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
564 struct fenced_buffer
*fenced_buf
,
567 assert(!fenced_buf
->buffer
);
569 /* Check for signaled buffers before trying to allocate. */
570 fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
);
572 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
574 /* Keep trying while there is some sort of progress:
575 * - fences are expiring,
576 * - or buffers are being being swapped out from GPU memory into CPU memory.
578 while (!fenced_buf
->buffer
&&
579 (fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
) ||
580 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
581 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
584 if (!fenced_buf
->buffer
&& wait
) {
585 /* Same as before, but this time around, wait to free buffers if
588 while (!fenced_buf
->buffer
&&
589 (fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
) ||
590 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
591 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
595 if (!fenced_buf
->buffer
) {
597 fenced_manager_dump_locked(fenced_mgr
);
600 return PIPE_ERROR_OUT_OF_MEMORY
;
607 static enum pipe_error
608 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
)
612 assert(fenced_buf
->data
);
613 assert(fenced_buf
->buffer
);
615 map
= pb_map(fenced_buf
->buffer
, PB_USAGE_CPU_WRITE
, NULL
);
619 memcpy(map
, fenced_buf
->data
, fenced_buf
->size
);
621 pb_unmap(fenced_buf
->buffer
);
627 static enum pipe_error
628 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
)
632 assert(fenced_buf
->data
);
633 assert(fenced_buf
->buffer
);
635 map
= pb_map(fenced_buf
->buffer
, PB_USAGE_CPU_READ
, NULL
);
639 memcpy(fenced_buf
->data
, map
, fenced_buf
->size
);
641 pb_unmap(fenced_buf
->buffer
);
648 fenced_buffer_destroy(struct pb_buffer
*buf
)
650 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
651 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
653 assert(!pipe_is_referenced(&fenced_buf
->base
.reference
));
655 mtx_lock(&fenced_mgr
->mutex
);
657 fenced_buffer_destroy_locked(fenced_mgr
, fenced_buf
);
659 mtx_unlock(&fenced_mgr
->mutex
);
664 fenced_buffer_map(struct pb_buffer
*buf
,
665 enum pb_usage_flags flags
, void *flush_ctx
)
667 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
668 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
669 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
672 mtx_lock(&fenced_mgr
->mutex
);
674 assert(!(flags
& PB_USAGE_GPU_READ_WRITE
));
676 /* Serialize writes. */
677 while ((fenced_buf
->flags
& PB_USAGE_GPU_WRITE
) ||
678 ((fenced_buf
->flags
& PB_USAGE_GPU_READ
) &&
679 (flags
& PB_USAGE_CPU_WRITE
))) {
681 /* Don't wait for the GPU to finish accessing it,
682 * if blocking is forbidden.
684 if ((flags
& PB_USAGE_DONTBLOCK
) &&
685 ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) != 0) {
689 if (flags
& PB_USAGE_UNSYNCHRONIZED
) {
693 /* Wait for the GPU to finish accessing. This will release and re-acquire
694 * the mutex, so all copies of mutable state must be discarded.
696 fenced_buffer_finish_locked(fenced_mgr
, fenced_buf
);
699 if (fenced_buf
->buffer
) {
700 map
= pb_map(fenced_buf
->buffer
, flags
, flush_ctx
);
702 assert(fenced_buf
->data
);
703 map
= fenced_buf
->data
;
707 ++fenced_buf
->mapcount
;
708 fenced_buf
->flags
|= flags
& PB_USAGE_CPU_READ_WRITE
;
712 mtx_unlock(&fenced_mgr
->mutex
);
719 fenced_buffer_unmap(struct pb_buffer
*buf
)
721 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
722 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
724 mtx_lock(&fenced_mgr
->mutex
);
726 assert(fenced_buf
->mapcount
);
727 if (fenced_buf
->mapcount
) {
728 if (fenced_buf
->buffer
)
729 pb_unmap(fenced_buf
->buffer
);
730 --fenced_buf
->mapcount
;
731 if (!fenced_buf
->mapcount
)
732 fenced_buf
->flags
&= ~PB_USAGE_CPU_READ_WRITE
;
735 mtx_unlock(&fenced_mgr
->mutex
);
739 static enum pipe_error
740 fenced_buffer_validate(struct pb_buffer
*buf
,
741 struct pb_validate
*vl
,
742 enum pb_usage_flags flags
)
744 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
745 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
748 mtx_lock(&fenced_mgr
->mutex
);
752 fenced_buf
->vl
= NULL
;
753 fenced_buf
->validation_flags
= 0;
758 assert(flags
& PB_USAGE_GPU_READ_WRITE
);
759 assert(!(flags
& ~PB_USAGE_GPU_READ_WRITE
));
760 flags
&= PB_USAGE_GPU_READ_WRITE
;
762 /* Buffer cannot be validated in two different lists. */
763 if (fenced_buf
->vl
&& fenced_buf
->vl
!= vl
) {
764 ret
= PIPE_ERROR_RETRY
;
768 if (fenced_buf
->vl
== vl
&&
769 (fenced_buf
->validation_flags
& flags
) == flags
) {
770 /* Nothing to do -- buffer already validated. */
775 /* Create and update GPU storage. */
776 if (!fenced_buf
->buffer
) {
777 assert(!fenced_buf
->mapcount
);
779 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, TRUE
);
780 if (ret
!= PIPE_OK
) {
784 ret
= fenced_buffer_copy_storage_to_gpu_locked(fenced_buf
);
785 if (ret
!= PIPE_OK
) {
786 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
790 if (fenced_buf
->mapcount
) {
791 debug_printf("warning: validating a buffer while it is still mapped\n");
793 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
797 ret
= pb_validate(fenced_buf
->buffer
, vl
, flags
);
802 fenced_buf
->validation_flags
|= flags
;
805 mtx_unlock(&fenced_mgr
->mutex
);
812 fenced_buffer_fence(struct pb_buffer
*buf
,
813 struct pipe_fence_handle
*fence
)
815 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
816 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
817 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
819 mtx_lock(&fenced_mgr
->mutex
);
821 assert(pipe_is_referenced(&fenced_buf
->base
.reference
));
822 assert(fenced_buf
->buffer
);
824 if (fence
!= fenced_buf
->fence
) {
825 assert(fenced_buf
->vl
);
826 assert(fenced_buf
->validation_flags
);
828 if (fenced_buf
->fence
) {
829 MAYBE_UNUSED boolean destroyed
= fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
833 ops
->fence_reference(ops
, &fenced_buf
->fence
, fence
);
834 fenced_buf
->flags
|= fenced_buf
->validation_flags
;
835 fenced_buffer_add_locked(fenced_mgr
, fenced_buf
);
838 pb_fence(fenced_buf
->buffer
, fence
);
840 fenced_buf
->vl
= NULL
;
841 fenced_buf
->validation_flags
= 0;
844 mtx_unlock(&fenced_mgr
->mutex
);
849 fenced_buffer_get_base_buffer(struct pb_buffer
*buf
,
850 struct pb_buffer
**base_buf
,
853 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
854 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
856 mtx_lock(&fenced_mgr
->mutex
);
858 /* This should only be called when the buffer is validated. Typically
859 * when processing relocations.
861 assert(fenced_buf
->vl
);
862 assert(fenced_buf
->buffer
);
864 if (fenced_buf
->buffer
) {
865 pb_get_base_buffer(fenced_buf
->buffer
, base_buf
, offset
);
871 mtx_unlock(&fenced_mgr
->mutex
);
875 static const struct pb_vtbl
876 fenced_buffer_vtbl
= {
877 fenced_buffer_destroy
,
880 fenced_buffer_validate
,
882 fenced_buffer_get_base_buffer
887 * Wrap a buffer in a fenced buffer.
889 static struct pb_buffer
*
890 fenced_bufmgr_create_buffer(struct pb_manager
*mgr
,
892 const struct pb_desc
*desc
)
894 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
895 struct fenced_buffer
*fenced_buf
;
898 /* Don't stall the GPU, waste time evicting buffers, or waste memory
899 * trying to create a buffer that will most likely never fit into the
902 if (size
> fenced_mgr
->max_buffer_size
) {
906 fenced_buf
= CALLOC_STRUCT(fenced_buffer
);
910 pipe_reference_init(&fenced_buf
->base
.reference
, 1);
911 fenced_buf
->base
.alignment
= desc
->alignment
;
912 fenced_buf
->base
.usage
= desc
->usage
;
913 fenced_buf
->base
.size
= size
;
914 fenced_buf
->size
= size
;
915 fenced_buf
->desc
= *desc
;
917 fenced_buf
->base
.vtbl
= &fenced_buffer_vtbl
;
918 fenced_buf
->mgr
= fenced_mgr
;
920 mtx_lock(&fenced_mgr
->mutex
);
922 /* Try to create GPU storage without stalling. */
923 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, FALSE
);
925 /* Attempt to use CPU memory to avoid stalling the GPU. */
926 if (ret
!= PIPE_OK
) {
927 ret
= fenced_buffer_create_cpu_storage_locked(fenced_mgr
, fenced_buf
);
930 /* Create GPU storage, waiting for some to be available. */
931 if (ret
!= PIPE_OK
) {
932 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, TRUE
);
936 if (ret
!= PIPE_OK
) {
940 assert(fenced_buf
->buffer
|| fenced_buf
->data
);
942 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
943 ++fenced_mgr
->num_unfenced
;
944 mtx_unlock(&fenced_mgr
->mutex
);
946 return &fenced_buf
->base
;
949 mtx_unlock(&fenced_mgr
->mutex
);
957 fenced_bufmgr_flush(struct pb_manager
*mgr
)
959 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
961 mtx_lock(&fenced_mgr
->mutex
);
962 while (fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
964 mtx_unlock(&fenced_mgr
->mutex
);
966 assert(fenced_mgr
->provider
->flush
);
967 if (fenced_mgr
->provider
->flush
)
968 fenced_mgr
->provider
->flush(fenced_mgr
->provider
);
973 fenced_bufmgr_destroy(struct pb_manager
*mgr
)
975 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
977 mtx_lock(&fenced_mgr
->mutex
);
979 /* Wait on outstanding fences. */
980 while (fenced_mgr
->num_fenced
) {
981 mtx_unlock(&fenced_mgr
->mutex
);
982 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
985 mtx_lock(&fenced_mgr
->mutex
);
986 while (fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
991 /* assert(!fenced_mgr->num_unfenced); */
994 mtx_unlock(&fenced_mgr
->mutex
);
995 mtx_destroy(&fenced_mgr
->mutex
);
997 if (fenced_mgr
->provider
)
998 fenced_mgr
->provider
->destroy(fenced_mgr
->provider
);
1000 fenced_mgr
->ops
->destroy(fenced_mgr
->ops
);
1007 fenced_bufmgr_create(struct pb_manager
*provider
,
1008 struct pb_fence_ops
*ops
,
1009 pb_size max_buffer_size
,
1010 pb_size max_cpu_total_size
)
1012 struct fenced_manager
*fenced_mgr
;
1017 fenced_mgr
= CALLOC_STRUCT(fenced_manager
);
1021 fenced_mgr
->base
.destroy
= fenced_bufmgr_destroy
;
1022 fenced_mgr
->base
.create_buffer
= fenced_bufmgr_create_buffer
;
1023 fenced_mgr
->base
.flush
= fenced_bufmgr_flush
;
1025 fenced_mgr
->provider
= provider
;
1026 fenced_mgr
->ops
= ops
;
1027 fenced_mgr
->max_buffer_size
= max_buffer_size
;
1028 fenced_mgr
->max_cpu_total_size
= max_cpu_total_size
;
1030 LIST_INITHEAD(&fenced_mgr
->fenced
);
1031 fenced_mgr
->num_fenced
= 0;
1033 LIST_INITHEAD(&fenced_mgr
->unfenced
);
1034 fenced_mgr
->num_unfenced
= 0;
1036 (void) mtx_init(&fenced_mgr
->mutex
, mtx_plain
);
1038 return &fenced_mgr
->base
;