Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
34 */
35
36
37 #include "pipe/p_config.h"
38
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40 #include <unistd.h>
41 #include <sched.h>
42 #endif
43
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "os/os_thread.h"
48 #include "util/u_memory.h"
49 #include "util/list.h"
50
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
53 #include "pb_bufmgr.h"
54
55
56
57 /**
58 * Convenience macro (type safe).
59 */
60 #define SUPER(__derived) (&(__derived)->base)
61
62
63 struct fenced_manager
64 {
65 struct pb_manager base;
66 struct pb_manager *provider;
67 struct pb_fence_ops *ops;
68
69 /**
70 * Maximum buffer size that can be safely allocated.
71 */
72 pb_size max_buffer_size;
73
74 /**
75 * Maximum cpu memory we can allocate before we start waiting for the
76 * GPU to idle.
77 */
78 pb_size max_cpu_total_size;
79
80 /**
81 * Following members are mutable and protected by this mutex.
82 */
83 pipe_mutex mutex;
84
85 /**
86 * Fenced buffer list.
87 *
88 * All fenced buffers are placed in this listed, ordered from the oldest
89 * fence to the newest fence.
90 */
91 struct list_head fenced;
92 pb_size num_fenced;
93
94 struct list_head unfenced;
95 pb_size num_unfenced;
96
97 /**
98 * How much temporary CPU memory is being used to hold unvalidated buffers.
99 */
100 pb_size cpu_total_size;
101 };
102
103
104 /**
105 * Fenced buffer.
106 *
107 * Wrapper around a pipe buffer which adds fencing and reference counting.
108 */
109 struct fenced_buffer
110 {
111 /*
112 * Immutable members.
113 */
114
115 struct pb_buffer base;
116 struct fenced_manager *mgr;
117
118 /*
119 * Following members are mutable and protected by fenced_manager::mutex.
120 */
121
122 struct list_head head;
123
124 /**
125 * Buffer with storage.
126 */
127 struct pb_buffer *buffer;
128 pb_size size;
129 struct pb_desc desc;
130
131 /**
132 * Temporary CPU storage data. Used when there isn't enough GPU memory to
133 * store the buffer.
134 */
135 void *data;
136
137 /**
138 * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
139 * buffer usage.
140 */
141 unsigned flags;
142
143 unsigned mapcount;
144
145 struct pb_validate *vl;
146 unsigned validation_flags;
147
148 struct pipe_fence_handle *fence;
149 };
150
151
152 static INLINE struct fenced_manager *
153 fenced_manager(struct pb_manager *mgr)
154 {
155 assert(mgr);
156 return (struct fenced_manager *)mgr;
157 }
158
159
160 static INLINE struct fenced_buffer *
161 fenced_buffer(struct pb_buffer *buf)
162 {
163 assert(buf);
164 return (struct fenced_buffer *)buf;
165 }
166
167
168 static void
169 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
170
171 static enum pipe_error
172 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
173 struct fenced_buffer *fenced_buf);
174
175 static void
176 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
177
178 static enum pipe_error
179 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
180 struct fenced_buffer *fenced_buf,
181 boolean wait);
182
183 static enum pipe_error
184 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
185
186 static enum pipe_error
187 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
188
189
190 /**
191 * Dump the fenced buffer list.
192 *
193 * Useful to understand failures to allocate buffers.
194 */
195 static void
196 fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
197 {
198 #ifdef DEBUG
199 struct pb_fence_ops *ops = fenced_mgr->ops;
200 struct list_head *curr, *next;
201 struct fenced_buffer *fenced_buf;
202
203 debug_printf("%10s %7s %8s %7s %10s %s\n",
204 "buffer", "size", "refcount", "storage", "fence", "signalled");
205
206 curr = fenced_mgr->unfenced.next;
207 next = curr->next;
208 while(curr != &fenced_mgr->unfenced) {
209 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
210 assert(!fenced_buf->fence);
211 debug_printf("%10p %7u %8u %7s\n",
212 (void *) fenced_buf,
213 fenced_buf->base.size,
214 p_atomic_read(&fenced_buf->base.reference.count),
215 fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
216 curr = next;
217 next = curr->next;
218 }
219
220 curr = fenced_mgr->fenced.next;
221 next = curr->next;
222 while(curr != &fenced_mgr->fenced) {
223 int signaled;
224 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
225 assert(fenced_buf->buffer);
226 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
227 debug_printf("%10p %7u %8u %7s %10p %s\n",
228 (void *) fenced_buf,
229 fenced_buf->base.size,
230 p_atomic_read(&fenced_buf->base.reference.count),
231 "gpu",
232 (void *) fenced_buf->fence,
233 signaled == 0 ? "y" : "n");
234 curr = next;
235 next = curr->next;
236 }
237 #else
238 (void)fenced_mgr;
239 #endif
240 }
241
242
243 static INLINE void
244 fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
245 struct fenced_buffer *fenced_buf)
246 {
247 assert(!pipe_is_referenced(&fenced_buf->base.reference));
248
249 assert(!fenced_buf->fence);
250 assert(fenced_buf->head.prev);
251 assert(fenced_buf->head.next);
252 LIST_DEL(&fenced_buf->head);
253 assert(fenced_mgr->num_unfenced);
254 --fenced_mgr->num_unfenced;
255
256 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
257 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
258
259 FREE(fenced_buf);
260 }
261
262
263 /**
264 * Add the buffer to the fenced list.
265 *
266 * Reference count should be incremented before calling this function.
267 */
268 static INLINE void
269 fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
270 struct fenced_buffer *fenced_buf)
271 {
272 assert(pipe_is_referenced(&fenced_buf->base.reference));
273 assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
274 assert(fenced_buf->fence);
275
276 p_atomic_inc(&fenced_buf->base.reference.count);
277
278 LIST_DEL(&fenced_buf->head);
279 assert(fenced_mgr->num_unfenced);
280 --fenced_mgr->num_unfenced;
281 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
282 ++fenced_mgr->num_fenced;
283 }
284
285
286 /**
287 * Remove the buffer from the fenced list, and potentially destroy the buffer
288 * if the reference count reaches zero.
289 *
290 * Returns TRUE if the buffer was detroyed.
291 */
292 static INLINE boolean
293 fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
294 struct fenced_buffer *fenced_buf)
295 {
296 struct pb_fence_ops *ops = fenced_mgr->ops;
297
298 assert(fenced_buf->fence);
299 assert(fenced_buf->mgr == fenced_mgr);
300
301 ops->fence_reference(ops, &fenced_buf->fence, NULL);
302 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
303
304 assert(fenced_buf->head.prev);
305 assert(fenced_buf->head.next);
306
307 LIST_DEL(&fenced_buf->head);
308 assert(fenced_mgr->num_fenced);
309 --fenced_mgr->num_fenced;
310
311 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
312 ++fenced_mgr->num_unfenced;
313
314 if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
315 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
316 return TRUE;
317 }
318
319 return FALSE;
320 }
321
322
323 /**
324 * Wait for the fence to expire, and remove it from the fenced list.
325 *
326 * This function will release and re-acquire the mutex, so any copy of mutable
327 * state must be discarded after calling it.
328 */
329 static INLINE enum pipe_error
330 fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
331 struct fenced_buffer *fenced_buf)
332 {
333 struct pb_fence_ops *ops = fenced_mgr->ops;
334 enum pipe_error ret = PIPE_ERROR;
335
336 #if 0
337 debug_warning("waiting for GPU");
338 #endif
339
340 assert(pipe_is_referenced(&fenced_buf->base.reference));
341 assert(fenced_buf->fence);
342
343 if(fenced_buf->fence) {
344 struct pipe_fence_handle *fence = NULL;
345 int finished;
346 boolean proceed;
347
348 ops->fence_reference(ops, &fence, fenced_buf->fence);
349
350 pipe_mutex_unlock(fenced_mgr->mutex);
351
352 finished = ops->fence_finish(ops, fenced_buf->fence, 0);
353
354 pipe_mutex_lock(fenced_mgr->mutex);
355
356 assert(pipe_is_referenced(&fenced_buf->base.reference));
357
358 /*
359 * Only proceed if the fence object didn't change in the meanwhile.
360 * Otherwise assume the work has been already carried out by another
361 * thread that re-aquired the lock before us.
362 */
363 proceed = fence == fenced_buf->fence ? TRUE : FALSE;
364
365 ops->fence_reference(ops, &fence, NULL);
366
367 if(proceed && finished == 0) {
368 /*
369 * Remove from the fenced list
370 */
371
372 boolean destroyed;
373
374 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
375
376 /* TODO: remove consequents buffers with the same fence? */
377
378 assert(!destroyed);
379 (void) destroyed; /* silence unused var warning for non-debug build */
380
381 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
382
383 ret = PIPE_OK;
384 }
385 }
386
387 return ret;
388 }
389
390
391 /**
392 * Remove as many fenced buffers from the fenced list as possible.
393 *
394 * Returns TRUE if at least one buffer was removed.
395 */
396 static boolean
397 fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
398 boolean wait)
399 {
400 struct pb_fence_ops *ops = fenced_mgr->ops;
401 struct list_head *curr, *next;
402 struct fenced_buffer *fenced_buf;
403 struct pipe_fence_handle *prev_fence = NULL;
404 boolean ret = FALSE;
405
406 curr = fenced_mgr->fenced.next;
407 next = curr->next;
408 while(curr != &fenced_mgr->fenced) {
409 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
410
411 if(fenced_buf->fence != prev_fence) {
412 int signaled;
413
414 if (wait) {
415 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
416
417 /*
418 * Don't return just now. Instead preemptively check if the
419 * following buffers' fences already expired, without further waits.
420 */
421 wait = FALSE;
422 }
423 else {
424 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
425 }
426
427 if (signaled != 0) {
428 return ret;
429 }
430
431 prev_fence = fenced_buf->fence;
432 }
433 else {
434 /* This buffer's fence object is identical to the previous buffer's
435 * fence object, so no need to check the fence again.
436 */
437 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
438 }
439
440 fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
441
442 ret = TRUE;
443
444 curr = next;
445 next = curr->next;
446 }
447
448 return ret;
449 }
450
451
452 /**
453 * Try to free some GPU memory by backing it up into CPU memory.
454 *
455 * Returns TRUE if at least one buffer was freed.
456 */
457 static boolean
458 fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
459 {
460 struct list_head *curr, *next;
461 struct fenced_buffer *fenced_buf;
462
463 curr = fenced_mgr->unfenced.next;
464 next = curr->next;
465 while(curr != &fenced_mgr->unfenced) {
466 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
467
468 /*
469 * We can only move storage if the buffer is not mapped and not
470 * validated.
471 */
472 if(fenced_buf->buffer &&
473 !fenced_buf->mapcount &&
474 !fenced_buf->vl) {
475 enum pipe_error ret;
476
477 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
478 if(ret == PIPE_OK) {
479 ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
480 if(ret == PIPE_OK) {
481 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
482 return TRUE;
483 }
484 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
485 }
486 }
487
488 curr = next;
489 next = curr->next;
490 }
491
492 return FALSE;
493 }
494
495
496 /**
497 * Destroy CPU storage for this buffer.
498 */
499 static void
500 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
501 {
502 if(fenced_buf->data) {
503 align_free(fenced_buf->data);
504 fenced_buf->data = NULL;
505 assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
506 fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
507 }
508 }
509
510
511 /**
512 * Create CPU storage for this buffer.
513 */
514 static enum pipe_error
515 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
516 struct fenced_buffer *fenced_buf)
517 {
518 assert(!fenced_buf->data);
519 if(fenced_buf->data)
520 return PIPE_OK;
521
522 if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
523 return PIPE_ERROR_OUT_OF_MEMORY;
524
525 fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
526 if(!fenced_buf->data)
527 return PIPE_ERROR_OUT_OF_MEMORY;
528
529 fenced_mgr->cpu_total_size += fenced_buf->size;
530
531 return PIPE_OK;
532 }
533
534
535 /**
536 * Destroy the GPU storage.
537 */
538 static void
539 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
540 {
541 if(fenced_buf->buffer) {
542 pb_reference(&fenced_buf->buffer, NULL);
543 }
544 }
545
546
547 /**
548 * Try to create GPU storage for this buffer.
549 *
550 * This function is a shorthand around pb_manager::create_buffer for
551 * fenced_buffer_create_gpu_storage_locked()'s benefit.
552 */
553 static INLINE boolean
554 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
555 struct fenced_buffer *fenced_buf)
556 {
557 struct pb_manager *provider = fenced_mgr->provider;
558
559 assert(!fenced_buf->buffer);
560
561 fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
562 fenced_buf->size,
563 &fenced_buf->desc);
564 return fenced_buf->buffer ? TRUE : FALSE;
565 }
566
567
568 /**
569 * Create GPU storage for this buffer.
570 */
571 static enum pipe_error
572 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
573 struct fenced_buffer *fenced_buf,
574 boolean wait)
575 {
576 assert(!fenced_buf->buffer);
577
578 /*
579 * Check for signaled buffers before trying to allocate.
580 */
581 fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
582
583 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
584
585 /*
586 * Keep trying while there is some sort of progress:
587 * - fences are expiring,
588 * - or buffers are being being swapped out from GPU memory into CPU memory.
589 */
590 while(!fenced_buf->buffer &&
591 (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
592 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
593 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
594 }
595
596 if(!fenced_buf->buffer && wait) {
597 /*
598 * Same as before, but this time around, wait to free buffers if
599 * necessary.
600 */
601 while(!fenced_buf->buffer &&
602 (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
603 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
604 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
605 }
606 }
607
608 if(!fenced_buf->buffer) {
609 if(0)
610 fenced_manager_dump_locked(fenced_mgr);
611
612 /* give up */
613 return PIPE_ERROR_OUT_OF_MEMORY;
614 }
615
616 return PIPE_OK;
617 }
618
619
620 static enum pipe_error
621 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
622 {
623 uint8_t *map;
624
625 assert(fenced_buf->data);
626 assert(fenced_buf->buffer);
627
628 map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);
629 if(!map)
630 return PIPE_ERROR;
631
632 memcpy(map, fenced_buf->data, fenced_buf->size);
633
634 pb_unmap(fenced_buf->buffer);
635
636 return PIPE_OK;
637 }
638
639
640 static enum pipe_error
641 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
642 {
643 const uint8_t *map;
644
645 assert(fenced_buf->data);
646 assert(fenced_buf->buffer);
647
648 map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ, NULL);
649 if(!map)
650 return PIPE_ERROR;
651
652 memcpy(fenced_buf->data, map, fenced_buf->size);
653
654 pb_unmap(fenced_buf->buffer);
655
656 return PIPE_OK;
657 }
658
659
660 static void
661 fenced_buffer_destroy(struct pb_buffer *buf)
662 {
663 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
664 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
665
666 assert(!pipe_is_referenced(&fenced_buf->base.reference));
667
668 pipe_mutex_lock(fenced_mgr->mutex);
669
670 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
671
672 pipe_mutex_unlock(fenced_mgr->mutex);
673 }
674
675
676 static void *
677 fenced_buffer_map(struct pb_buffer *buf,
678 unsigned flags, void *flush_ctx)
679 {
680 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
681 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
682 struct pb_fence_ops *ops = fenced_mgr->ops;
683 void *map = NULL;
684
685 pipe_mutex_lock(fenced_mgr->mutex);
686
687 assert(!(flags & PB_USAGE_GPU_READ_WRITE));
688
689 /*
690 * Serialize writes.
691 */
692 while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
693 ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
694 (flags & PB_USAGE_CPU_WRITE))) {
695
696 /*
697 * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
698 */
699 if((flags & PB_USAGE_DONTBLOCK) &&
700 ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
701 goto done;
702 }
703
704 if (flags & PB_USAGE_UNSYNCHRONIZED) {
705 break;
706 }
707
708 /*
709 * Wait for the GPU to finish accessing. This will release and re-acquire
710 * the mutex, so all copies of mutable state must be discarded.
711 */
712 fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
713 }
714
715 if(fenced_buf->buffer) {
716 map = pb_map(fenced_buf->buffer, flags, flush_ctx);
717 }
718 else {
719 assert(fenced_buf->data);
720 map = fenced_buf->data;
721 }
722
723 if(map) {
724 ++fenced_buf->mapcount;
725 fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
726 }
727
728 done:
729 pipe_mutex_unlock(fenced_mgr->mutex);
730
731 return map;
732 }
733
734
735 static void
736 fenced_buffer_unmap(struct pb_buffer *buf)
737 {
738 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
739 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
740
741 pipe_mutex_lock(fenced_mgr->mutex);
742
743 assert(fenced_buf->mapcount);
744 if(fenced_buf->mapcount) {
745 if (fenced_buf->buffer)
746 pb_unmap(fenced_buf->buffer);
747 --fenced_buf->mapcount;
748 if(!fenced_buf->mapcount)
749 fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
750 }
751
752 pipe_mutex_unlock(fenced_mgr->mutex);
753 }
754
755
756 static enum pipe_error
757 fenced_buffer_validate(struct pb_buffer *buf,
758 struct pb_validate *vl,
759 unsigned flags)
760 {
761 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
762 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
763 enum pipe_error ret;
764
765 pipe_mutex_lock(fenced_mgr->mutex);
766
767 if(!vl) {
768 /* invalidate */
769 fenced_buf->vl = NULL;
770 fenced_buf->validation_flags = 0;
771 ret = PIPE_OK;
772 goto done;
773 }
774
775 assert(flags & PB_USAGE_GPU_READ_WRITE);
776 assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
777 flags &= PB_USAGE_GPU_READ_WRITE;
778
779 /* Buffer cannot be validated in two different lists */
780 if(fenced_buf->vl && fenced_buf->vl != vl) {
781 ret = PIPE_ERROR_RETRY;
782 goto done;
783 }
784
785 if(fenced_buf->vl == vl &&
786 (fenced_buf->validation_flags & flags) == flags) {
787 /* Nothing to do -- buffer already validated */
788 ret = PIPE_OK;
789 goto done;
790 }
791
792 /*
793 * Create and update GPU storage.
794 */
795 if(!fenced_buf->buffer) {
796 assert(!fenced_buf->mapcount);
797
798 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
799 if(ret != PIPE_OK) {
800 goto done;
801 }
802
803 ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
804 if(ret != PIPE_OK) {
805 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
806 goto done;
807 }
808
809 if(fenced_buf->mapcount) {
810 debug_printf("warning: validating a buffer while it is still mapped\n");
811 }
812 else {
813 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
814 }
815 }
816
817 ret = pb_validate(fenced_buf->buffer, vl, flags);
818 if (ret != PIPE_OK)
819 goto done;
820
821 fenced_buf->vl = vl;
822 fenced_buf->validation_flags |= flags;
823
824 done:
825 pipe_mutex_unlock(fenced_mgr->mutex);
826
827 return ret;
828 }
829
830
831 static void
832 fenced_buffer_fence(struct pb_buffer *buf,
833 struct pipe_fence_handle *fence)
834 {
835 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
836 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
837 struct pb_fence_ops *ops = fenced_mgr->ops;
838
839 pipe_mutex_lock(fenced_mgr->mutex);
840
841 assert(pipe_is_referenced(&fenced_buf->base.reference));
842 assert(fenced_buf->buffer);
843
844 if(fence != fenced_buf->fence) {
845 assert(fenced_buf->vl);
846 assert(fenced_buf->validation_flags);
847
848 if (fenced_buf->fence) {
849 boolean destroyed;
850 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
851 assert(!destroyed);
852 }
853 if (fence) {
854 ops->fence_reference(ops, &fenced_buf->fence, fence);
855 fenced_buf->flags |= fenced_buf->validation_flags;
856 fenced_buffer_add_locked(fenced_mgr, fenced_buf);
857 }
858
859 pb_fence(fenced_buf->buffer, fence);
860
861 fenced_buf->vl = NULL;
862 fenced_buf->validation_flags = 0;
863 }
864
865 pipe_mutex_unlock(fenced_mgr->mutex);
866 }
867
868
869 static void
870 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
871 struct pb_buffer **base_buf,
872 pb_size *offset)
873 {
874 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
875 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
876
877 pipe_mutex_lock(fenced_mgr->mutex);
878
879 /*
880 * This should only be called when the buffer is validated. Typically
881 * when processing relocations.
882 */
883 assert(fenced_buf->vl);
884 assert(fenced_buf->buffer);
885
886 if(fenced_buf->buffer)
887 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
888 else {
889 *base_buf = buf;
890 *offset = 0;
891 }
892
893 pipe_mutex_unlock(fenced_mgr->mutex);
894 }
895
896
897 static const struct pb_vtbl
898 fenced_buffer_vtbl = {
899 fenced_buffer_destroy,
900 fenced_buffer_map,
901 fenced_buffer_unmap,
902 fenced_buffer_validate,
903 fenced_buffer_fence,
904 fenced_buffer_get_base_buffer
905 };
906
907
908 /**
909 * Wrap a buffer in a fenced buffer.
910 */
911 static struct pb_buffer *
912 fenced_bufmgr_create_buffer(struct pb_manager *mgr,
913 pb_size size,
914 const struct pb_desc *desc)
915 {
916 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
917 struct fenced_buffer *fenced_buf;
918 enum pipe_error ret;
919
920 /*
921 * Don't stall the GPU, waste time evicting buffers, or waste memory
922 * trying to create a buffer that will most likely never fit into the
923 * graphics aperture.
924 */
925 if(size > fenced_mgr->max_buffer_size) {
926 goto no_buffer;
927 }
928
929 fenced_buf = CALLOC_STRUCT(fenced_buffer);
930 if(!fenced_buf)
931 goto no_buffer;
932
933 pipe_reference_init(&fenced_buf->base.reference, 1);
934 fenced_buf->base.alignment = desc->alignment;
935 fenced_buf->base.usage = desc->usage;
936 fenced_buf->base.size = size;
937 fenced_buf->size = size;
938 fenced_buf->desc = *desc;
939
940 fenced_buf->base.vtbl = &fenced_buffer_vtbl;
941 fenced_buf->mgr = fenced_mgr;
942
943 pipe_mutex_lock(fenced_mgr->mutex);
944
945 /*
946 * Try to create GPU storage without stalling,
947 */
948 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
949
950 /*
951 * Attempt to use CPU memory to avoid stalling the GPU.
952 */
953 if(ret != PIPE_OK) {
954 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
955 }
956
957 /*
958 * Create GPU storage, waiting for some to be available.
959 */
960 if(ret != PIPE_OK) {
961 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
962 }
963
964 /*
965 * Give up.
966 */
967 if(ret != PIPE_OK) {
968 goto no_storage;
969 }
970
971 assert(fenced_buf->buffer || fenced_buf->data);
972
973 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
974 ++fenced_mgr->num_unfenced;
975 pipe_mutex_unlock(fenced_mgr->mutex);
976
977 return &fenced_buf->base;
978
979 no_storage:
980 pipe_mutex_unlock(fenced_mgr->mutex);
981 FREE(fenced_buf);
982 no_buffer:
983 return NULL;
984 }
985
986
987 static void
988 fenced_bufmgr_flush(struct pb_manager *mgr)
989 {
990 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
991
992 pipe_mutex_lock(fenced_mgr->mutex);
993 while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
994 ;
995 pipe_mutex_unlock(fenced_mgr->mutex);
996
997 assert(fenced_mgr->provider->flush);
998 if(fenced_mgr->provider->flush)
999 fenced_mgr->provider->flush(fenced_mgr->provider);
1000 }
1001
1002
1003 static void
1004 fenced_bufmgr_destroy(struct pb_manager *mgr)
1005 {
1006 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
1007
1008 pipe_mutex_lock(fenced_mgr->mutex);
1009
1010 /* Wait on outstanding fences */
1011 while (fenced_mgr->num_fenced) {
1012 pipe_mutex_unlock(fenced_mgr->mutex);
1013 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
1014 sched_yield();
1015 #endif
1016 pipe_mutex_lock(fenced_mgr->mutex);
1017 while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
1018 ;
1019 }
1020
1021 #ifdef DEBUG
1022 /*assert(!fenced_mgr->num_unfenced);*/
1023 #endif
1024
1025 pipe_mutex_unlock(fenced_mgr->mutex);
1026 pipe_mutex_destroy(fenced_mgr->mutex);
1027
1028 if(fenced_mgr->provider)
1029 fenced_mgr->provider->destroy(fenced_mgr->provider);
1030
1031 fenced_mgr->ops->destroy(fenced_mgr->ops);
1032
1033 FREE(fenced_mgr);
1034 }
1035
1036
1037 struct pb_manager *
1038 fenced_bufmgr_create(struct pb_manager *provider,
1039 struct pb_fence_ops *ops,
1040 pb_size max_buffer_size,
1041 pb_size max_cpu_total_size)
1042 {
1043 struct fenced_manager *fenced_mgr;
1044
1045 if(!provider)
1046 return NULL;
1047
1048 fenced_mgr = CALLOC_STRUCT(fenced_manager);
1049 if (!fenced_mgr)
1050 return NULL;
1051
1052 fenced_mgr->base.destroy = fenced_bufmgr_destroy;
1053 fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
1054 fenced_mgr->base.flush = fenced_bufmgr_flush;
1055
1056 fenced_mgr->provider = provider;
1057 fenced_mgr->ops = ops;
1058 fenced_mgr->max_buffer_size = max_buffer_size;
1059 fenced_mgr->max_cpu_total_size = max_cpu_total_size;
1060
1061 LIST_INITHEAD(&fenced_mgr->fenced);
1062 fenced_mgr->num_fenced = 0;
1063
1064 LIST_INITHEAD(&fenced_mgr->unfenced);
1065 fenced_mgr->num_unfenced = 0;
1066
1067 pipe_mutex_init(fenced_mgr->mutex);
1068
1069 return &fenced_mgr->base;
1070 }