1 /**************************************************************************
3 * Copyright 2007-2010 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Implementation of fenced buffers.
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
37 #include "pipe/p_config.h"
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "pipe/p_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
53 #include "pb_bufmgr.h"
58 * Convenience macro (type safe).
60 #define SUPER(__derived) (&(__derived)->base)
65 struct pb_manager base
;
66 struct pb_manager
*provider
;
67 struct pb_fence_ops
*ops
;
70 * Maximum buffer size that can be safely allocated.
72 pb_size max_buffer_size
;
75 * Maximum cpu memory we can allocate before we start waiting for the
78 pb_size max_cpu_total_size
;
81 * Following members are mutable and protected by this mutex.
88 * All fenced buffers are placed in this listed, ordered from the oldest
89 * fence to the newest fence.
91 struct list_head fenced
;
94 struct list_head unfenced
;
98 * How much temporary CPU memory is being used to hold unvalidated buffers.
100 pb_size cpu_total_size
;
107 * Wrapper around a pipe buffer which adds fencing and reference counting.
115 struct pb_buffer base
;
116 struct fenced_manager
*mgr
;
119 * Following members are mutable and protected by fenced_manager::mutex.
122 struct list_head head
;
125 * Buffer with storage.
127 struct pb_buffer
*buffer
;
132 * Temporary CPU storage data. Used when there isn't enough GPU memory to
138 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
145 struct pb_validate
*vl
;
146 unsigned validation_flags
;
148 struct pipe_fence_handle
*fence
;
152 static INLINE
struct fenced_manager
*
153 fenced_manager(struct pb_manager
*mgr
)
156 return (struct fenced_manager
*)mgr
;
160 static INLINE
struct fenced_buffer
*
161 fenced_buffer(struct pb_buffer
*buf
)
164 return (struct fenced_buffer
*)buf
;
169 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
);
171 static enum pipe_error
172 fenced_buffer_create_cpu_storage_locked(struct fenced_buffer
*fenced_buf
);
175 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
);
177 static enum pipe_error
178 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
179 struct fenced_buffer
*fenced_buf
,
182 static enum pipe_error
183 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
);
185 static enum pipe_error
186 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
);
190 * Dump the fenced buffer list.
192 * Useful to understand failures to allocate buffers.
195 fenced_manager_dump_locked(struct fenced_manager
*fenced_mgr
)
198 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
199 struct list_head
*curr
, *next
;
200 struct fenced_buffer
*fenced_buf
;
202 debug_printf("%10s %7s %7s %10s %s\n",
203 "buffer", "size", "refcount", "fence", "signalled");
205 curr
= fenced_mgr
->unfenced
.next
;
207 while(curr
!= &fenced_mgr
->unfenced
) {
208 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
209 assert(!fenced_buf
->fence
);
210 debug_printf("%10p %7u %7u\n",
212 fenced_buf
->base
.base
.size
,
213 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
));
218 curr
= fenced_mgr
->fenced
.next
;
220 while(curr
!= &fenced_mgr
->fenced
) {
222 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
223 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
224 debug_printf("%10p %7u %7u %10p %s\n",
226 fenced_buf
->base
.base
.size
,
227 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
),
228 (void *) fenced_buf
->fence
,
229 signaled
== 0 ? "y" : "n");
240 * Add the buffer to the fenced list.
242 * Reference count should be incremented before calling this function.
245 fenced_buffer_add_locked(struct fenced_manager
*fenced_mgr
,
246 struct fenced_buffer
*fenced_buf
)
248 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
249 assert(fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
250 assert(fenced_buf
->fence
);
252 /* TODO: Move the reference count increment here */
254 LIST_DEL(&fenced_buf
->head
);
255 assert(fenced_mgr
->num_unfenced
);
256 --fenced_mgr
->num_unfenced
;
257 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->fenced
);
258 ++fenced_mgr
->num_fenced
;
263 * Remove the buffer from the fenced list.
265 * Reference count should be decremented after calling this function.
268 fenced_buffer_remove_locked(struct fenced_manager
*fenced_mgr
,
269 struct fenced_buffer
*fenced_buf
)
271 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
273 assert(fenced_buf
->fence
);
274 assert(fenced_buf
->mgr
== fenced_mgr
);
276 ops
->fence_reference(ops
, &fenced_buf
->fence
, NULL
);
277 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
279 assert(fenced_buf
->head
.prev
);
280 assert(fenced_buf
->head
.next
);
282 LIST_DEL(&fenced_buf
->head
);
283 assert(fenced_mgr
->num_fenced
);
284 --fenced_mgr
->num_fenced
;
286 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
287 ++fenced_mgr
->num_unfenced
;
289 /* TODO: Move the reference count decrement and destruction here */
294 * Wait for the fence to expire, and remove it from the fenced list.
296 static INLINE
enum pipe_error
297 fenced_buffer_finish_locked(struct fenced_manager
*fenced_mgr
,
298 struct fenced_buffer
*fenced_buf
)
300 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
301 enum pipe_error ret
= PIPE_ERROR
;
304 debug_warning("waiting for GPU");
307 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
308 assert(fenced_buf
->fence
);
310 if(fenced_buf
->fence
) {
311 if(ops
->fence_finish(ops
, fenced_buf
->fence
, 0) == 0) {
313 * Remove from the fenced list
315 fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
317 /* TODO: remove consequents buffers with the same fence? */
319 p_atomic_dec(&fenced_buf
->base
.base
.reference
.count
);
320 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
322 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
333 * Remove as many fenced buffers from the fenced list as possible.
335 * Returns TRUE if at least one buffer was removed.
338 fenced_manager_check_signalled_locked(struct fenced_manager
*fenced_mgr
,
341 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
342 struct list_head
*curr
, *next
;
343 struct fenced_buffer
*fenced_buf
;
344 struct pipe_fence_handle
*prev_fence
= NULL
;
347 curr
= fenced_mgr
->fenced
.next
;
349 while(curr
!= &fenced_mgr
->fenced
) {
350 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
352 if(fenced_buf
->fence
!= prev_fence
) {
356 signaled
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
359 * Don't return just now. Instead preemptively check if the
360 * following buffers' fences already expired, without further waits.
365 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
372 prev_fence
= fenced_buf
->fence
;
375 /* This buffer's fence object is identical to the previous buffer's
376 * fence object, so no need to check the fence again.
378 assert(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0);
381 fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
383 pb_reference((struct pb_buffer
**)&fenced_buf
, NULL
);
396 * Try to free some GPU memory by backing it up into CPU memory.
398 * Returns TRUE if at least one buffer was freed.
401 fenced_manager_free_gpu_storage_locked(struct fenced_manager
*fenced_mgr
)
403 struct list_head
*curr
, *next
;
404 struct fenced_buffer
*fenced_buf
;
406 curr
= fenced_mgr
->unfenced
.next
;
408 while(curr
!= &fenced_mgr
->unfenced
) {
409 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
412 * We can only move storage if the buffer is not mapped and not
415 if(fenced_buf
->buffer
&&
416 !fenced_buf
->mapcount
&&
420 ret
= fenced_buffer_create_cpu_storage_locked(fenced_buf
);
422 ret
= fenced_buffer_copy_storage_to_cpu_locked(fenced_buf
);
424 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
427 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
440 * Destroy CPU storage for this buffer.
443 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
)
445 if(fenced_buf
->data
) {
446 align_free(fenced_buf
->data
);
447 fenced_buf
->data
= NULL
;
448 fenced_buf
->mgr
->cpu_total_size
-= fenced_buf
->size
;
454 * Create CPU storage for this buffer.
456 static enum pipe_error
457 fenced_buffer_create_cpu_storage_locked(struct fenced_buffer
*fenced_buf
)
459 assert(!fenced_buf
->data
);
463 fenced_buf
->data
= align_malloc(fenced_buf
->size
, fenced_buf
->desc
.alignment
);
464 if(!fenced_buf
->data
)
465 return PIPE_ERROR_OUT_OF_MEMORY
;
467 fenced_buf
->mgr
->cpu_total_size
+= fenced_buf
->size
;
468 debug_printf("%s: cpu_total_size = %lu\n",
470 (unsigned long)fenced_buf
->mgr
->cpu_total_size
);
477 * Destroy the GPU storage.
480 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
)
482 if(fenced_buf
->buffer
) {
483 pb_reference(&fenced_buf
->buffer
, NULL
);
489 * Try to create GPU storage for this buffer.
491 * This function is a shorthand around pb_manager::create_buffer for
492 * fenced_buffer_create_gpu_storage_locked()'s benefit.
494 static INLINE boolean
495 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
496 struct fenced_buffer
*fenced_buf
)
498 struct pb_manager
*provider
= fenced_mgr
->provider
;
500 assert(!fenced_buf
->buffer
);
502 fenced_buf
->buffer
= provider
->create_buffer(fenced_mgr
->provider
,
505 return fenced_buf
->buffer
? TRUE
: FALSE
;
510 * Create GPU storage for this buffer.
512 static enum pipe_error
513 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
514 struct fenced_buffer
*fenced_buf
,
517 assert(!fenced_buf
->buffer
);
520 * Check for signaled buffers before trying to allocate.
522 fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
);
524 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
527 * Keep trying while there is some sort of progress:
528 * - fences are expiring,
529 * - or buffers are being being swapped out from GPU memory into CPU memory.
531 while(!fenced_buf
->buffer
&&
532 (fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
) ||
533 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
534 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
537 if(!fenced_buf
->buffer
&& wait
) {
539 * Same as before, but this time around, wait to free buffers if
542 while(!fenced_buf
->buffer
&&
543 (fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
) ||
544 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
545 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
549 if(!fenced_buf
->buffer
) {
551 fenced_manager_dump_locked(fenced_mgr
);
554 return PIPE_ERROR_OUT_OF_MEMORY
;
561 static enum pipe_error
562 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
)
566 assert(fenced_buf
->data
);
567 assert(fenced_buf
->buffer
);
569 map
= pb_map(fenced_buf
->buffer
, PIPE_BUFFER_USAGE_CPU_WRITE
);
573 memcpy(map
, fenced_buf
->data
, fenced_buf
->size
);
575 pb_unmap(fenced_buf
->buffer
);
581 static enum pipe_error
582 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
)
586 assert(fenced_buf
->data
);
587 assert(fenced_buf
->buffer
);
589 map
= pb_map(fenced_buf
->buffer
, PIPE_BUFFER_USAGE_CPU_READ
);
593 memcpy(fenced_buf
->data
, map
, fenced_buf
->size
);
595 pb_unmap(fenced_buf
->buffer
);
602 fenced_buffer_destroy(struct pb_buffer
*buf
)
604 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
605 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
607 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
609 pipe_mutex_lock(fenced_mgr
->mutex
);
611 assert(!fenced_buf
->fence
);
612 assert(fenced_buf
->head
.prev
);
613 assert(fenced_buf
->head
.next
);
614 LIST_DEL(&fenced_buf
->head
);
615 assert(fenced_mgr
->num_unfenced
);
616 --fenced_mgr
->num_unfenced
;
618 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
619 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
621 pipe_mutex_unlock(fenced_mgr
->mutex
);
628 fenced_buffer_map(struct pb_buffer
*buf
,
631 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
632 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
633 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
636 pipe_mutex_lock(fenced_mgr
->mutex
);
638 assert(!(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
640 /* Serialize writes */
641 if((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_WRITE
) ||
642 ((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ
) && (flags
& PIPE_BUFFER_USAGE_CPU_WRITE
))) {
644 if((flags
& PIPE_BUFFER_USAGE_DONTBLOCK
) &&
645 ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0) {
646 /* Don't wait for the GPU to finish writing */
650 /* Wait for the GPU to finish writing */
651 fenced_buffer_finish_locked(fenced_mgr
, fenced_buf
);
654 if(fenced_buf
->buffer
) {
655 map
= pb_map(fenced_buf
->buffer
, flags
);
658 assert(fenced_buf
->data
);
659 map
= fenced_buf
->data
;
663 ++fenced_buf
->mapcount
;
664 fenced_buf
->flags
|= flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
668 pipe_mutex_unlock(fenced_mgr
->mutex
);
675 fenced_buffer_unmap(struct pb_buffer
*buf
)
677 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
678 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
680 pipe_mutex_lock(fenced_mgr
->mutex
);
682 assert(fenced_buf
->mapcount
);
683 if(fenced_buf
->mapcount
) {
684 if (fenced_buf
->buffer
)
685 pb_unmap(fenced_buf
->buffer
);
686 --fenced_buf
->mapcount
;
687 if(!fenced_buf
->mapcount
)
688 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
691 pipe_mutex_unlock(fenced_mgr
->mutex
);
695 static enum pipe_error
696 fenced_buffer_validate(struct pb_buffer
*buf
,
697 struct pb_validate
*vl
,
700 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
701 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
704 pipe_mutex_lock(fenced_mgr
->mutex
);
708 fenced_buf
->vl
= NULL
;
709 fenced_buf
->validation_flags
= 0;
714 assert(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
715 assert(!(flags
& ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
716 flags
&= PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
718 /* Buffer cannot be validated in two different lists */
719 if(fenced_buf
->vl
&& fenced_buf
->vl
!= vl
) {
720 ret
= PIPE_ERROR_RETRY
;
724 if(fenced_buf
->vl
== vl
&&
725 (fenced_buf
->validation_flags
& flags
) == flags
) {
726 /* Nothing to do -- buffer already validated */
732 * Create and update GPU storage.
734 if(!fenced_buf
->buffer
) {
735 assert(!fenced_buf
->mapcount
);
737 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, TRUE
);
742 ret
= fenced_buffer_copy_storage_to_gpu_locked(fenced_buf
);
744 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
748 if(fenced_buf
->mapcount
) {
749 debug_printf("warning: validating a buffer while it is still mapped\n");
752 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
756 ret
= pb_validate(fenced_buf
->buffer
, vl
, flags
);
761 fenced_buf
->validation_flags
|= flags
;
764 pipe_mutex_unlock(fenced_mgr
->mutex
);
771 fenced_buffer_fence(struct pb_buffer
*buf
,
772 struct pipe_fence_handle
*fence
)
774 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
775 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
776 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
778 pipe_mutex_lock(fenced_mgr
->mutex
);
780 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
781 assert(fenced_buf
->buffer
);
783 if(fence
!= fenced_buf
->fence
) {
784 assert(fenced_buf
->vl
);
785 assert(fenced_buf
->validation_flags
);
787 if (fenced_buf
->fence
) {
788 fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
789 p_atomic_dec(&fenced_buf
->base
.base
.reference
.count
);
790 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
793 ops
->fence_reference(ops
, &fenced_buf
->fence
, fence
);
794 fenced_buf
->flags
|= fenced_buf
->validation_flags
;
795 p_atomic_inc(&fenced_buf
->base
.base
.reference
.count
);
796 fenced_buffer_add_locked(fenced_mgr
, fenced_buf
);
799 pb_fence(fenced_buf
->buffer
, fence
);
801 fenced_buf
->vl
= NULL
;
802 fenced_buf
->validation_flags
= 0;
805 pipe_mutex_unlock(fenced_mgr
->mutex
);
810 fenced_buffer_get_base_buffer(struct pb_buffer
*buf
,
811 struct pb_buffer
**base_buf
,
814 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
815 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
817 pipe_mutex_lock(fenced_mgr
->mutex
);
820 * This should only be called when the buffer is validated. Typically
821 * when processing relocations.
823 assert(fenced_buf
->vl
);
824 assert(fenced_buf
->buffer
);
826 if(fenced_buf
->buffer
)
827 pb_get_base_buffer(fenced_buf
->buffer
, base_buf
, offset
);
833 pipe_mutex_unlock(fenced_mgr
->mutex
);
837 static const struct pb_vtbl
838 fenced_buffer_vtbl
= {
839 fenced_buffer_destroy
,
842 fenced_buffer_validate
,
844 fenced_buffer_get_base_buffer
849 * Wrap a buffer in a fenced buffer.
851 static struct pb_buffer
*
852 fenced_bufmgr_create_buffer(struct pb_manager
*mgr
,
854 const struct pb_desc
*desc
)
856 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
857 struct fenced_buffer
*fenced_buf
;
860 fenced_buf
= CALLOC_STRUCT(fenced_buffer
);
864 pipe_reference_init(&fenced_buf
->base
.base
.reference
, 1);
865 fenced_buf
->base
.base
.alignment
= desc
->alignment
;
866 fenced_buf
->base
.base
.usage
= desc
->usage
;
867 fenced_buf
->base
.base
.size
= size
;
868 fenced_buf
->size
= size
;
869 fenced_buf
->desc
= *desc
;
871 fenced_buf
->base
.vtbl
= &fenced_buffer_vtbl
;
872 fenced_buf
->mgr
= fenced_mgr
;
874 pipe_mutex_lock(fenced_mgr
->mutex
);
877 * Try to create GPU storage without stalling,
879 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, 0);
882 * Don't stall the GPU or waste memory trying to create a buffer that will
883 * most likely never fit into the graphics aperture.
885 if(size
> fenced_mgr
->max_buffer_size
) {
889 if(fenced_mgr
->cpu_total_size
+ size
<= fenced_mgr
->max_cpu_total_size
) {
890 /* Use CPU memory to avoid stalling the GPU */
891 ret
= fenced_buffer_create_cpu_storage_locked(fenced_buf
);
894 /* Create GPU storage, waiting for some to be available */
895 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, 1);
902 assert(fenced_buf
->buffer
|| fenced_buf
->data
);
904 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
905 ++fenced_mgr
->num_unfenced
;
906 pipe_mutex_unlock(fenced_mgr
->mutex
);
908 return &fenced_buf
->base
;
911 pipe_mutex_unlock(fenced_mgr
->mutex
);
919 fenced_bufmgr_flush(struct pb_manager
*mgr
)
921 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
923 pipe_mutex_lock(fenced_mgr
->mutex
);
924 while(fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
926 pipe_mutex_unlock(fenced_mgr
->mutex
);
928 assert(fenced_mgr
->provider
->flush
);
929 if(fenced_mgr
->provider
->flush
)
930 fenced_mgr
->provider
->flush(fenced_mgr
->provider
);
935 fenced_bufmgr_destroy(struct pb_manager
*mgr
)
937 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
939 pipe_mutex_lock(fenced_mgr
->mutex
);
941 /* Wait on outstanding fences */
942 while (fenced_mgr
->num_fenced
) {
943 pipe_mutex_unlock(fenced_mgr
->mutex
);
944 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
947 pipe_mutex_lock(fenced_mgr
->mutex
);
948 while(fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
953 /*assert(!fenced_mgr->num_unfenced);*/
956 pipe_mutex_unlock(fenced_mgr
->mutex
);
957 pipe_mutex_destroy(fenced_mgr
->mutex
);
959 if(fenced_mgr
->provider
)
960 fenced_mgr
->provider
->destroy(fenced_mgr
->provider
);
962 fenced_mgr
->ops
->destroy(fenced_mgr
->ops
);
969 fenced_bufmgr_create(struct pb_manager
*provider
,
970 struct pb_fence_ops
*ops
,
971 pb_size max_buffer_size
,
972 pb_size max_cpu_total_size
)
974 struct fenced_manager
*fenced_mgr
;
979 fenced_mgr
= CALLOC_STRUCT(fenced_manager
);
983 fenced_mgr
->base
.destroy
= fenced_bufmgr_destroy
;
984 fenced_mgr
->base
.create_buffer
= fenced_bufmgr_create_buffer
;
985 fenced_mgr
->base
.flush
= fenced_bufmgr_flush
;
987 fenced_mgr
->provider
= provider
;
988 fenced_mgr
->ops
= ops
;
989 fenced_mgr
->max_buffer_size
= max_buffer_size
;
990 fenced_mgr
->max_cpu_total_size
= max_cpu_total_size
;
992 LIST_INITHEAD(&fenced_mgr
->fenced
);
993 fenced_mgr
->num_fenced
= 0;
995 LIST_INITHEAD(&fenced_mgr
->unfenced
);
996 fenced_mgr
->num_unfenced
= 0;
998 pipe_mutex_init(fenced_mgr
->mutex
);
1000 return &fenced_mgr
->base
;