Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2009 VMware, Inc.
4 * Copyright 2007-2010 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * \file
31 * Implementation of fenced buffers.
32 *
33 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
34 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
35 */
36
37
38 #include "pipe/p_config.h"
39
40 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
41 #include <unistd.h>
42 #include <sched.h>
43 #endif
44
45 #include "pipe/p_compiler.h"
46 #include "pipe/p_defines.h"
47 #include "util/u_debug.h"
48 #include "pipe/p_thread.h"
49 #include "util/u_memory.h"
50 #include "util/u_double_list.h"
51
52 #include "pb_buffer.h"
53 #include "pb_buffer_fenced.h"
54 #include "pb_bufmgr.h"
55
56
57
58 /**
59 * Convenience macro (type safe).
60 */
61 #define SUPER(__derived) (&(__derived)->base)
62
63
64 struct fenced_manager
65 {
66 struct pb_manager base;
67 struct pb_manager *provider;
68 struct pb_fence_ops *ops;
69
70 /**
71 * Maximum buffer size that can be safely allocated.
72 */
73 pb_size max_buffer_size;
74
75 /**
76 * Maximum cpu memory we can allocate before we start waiting for the
77 * GPU to idle.
78 */
79 pb_size max_cpu_total_size;
80
81 /**
82 * Following members are mutable and protected by this mutex.
83 */
84 pipe_mutex mutex;
85
86 /**
87 * Fenced buffer list.
88 *
89 * All fenced buffers are placed in this listed, ordered from the oldest
90 * fence to the newest fence.
91 */
92 struct list_head fenced;
93 pb_size num_fenced;
94
95 struct list_head unfenced;
96 pb_size num_unfenced;
97
98 /**
99 * How much temporary CPU memory is being used to hold unvalidated buffers.
100 */
101 pb_size cpu_total_size;
102 };
103
104
105 /**
106 * Fenced buffer.
107 *
108 * Wrapper around a pipe buffer which adds fencing and reference counting.
109 */
110 struct fenced_buffer
111 {
112 /*
113 * Immutable members.
114 */
115
116 struct pb_buffer base;
117 struct fenced_manager *mgr;
118
119 /*
120 * Following members are mutable and protected by fenced_manager::mutex.
121 */
122
123 struct list_head head;
124
125 /**
126 * Buffer with storage.
127 */
128 struct pb_buffer *buffer;
129 pb_size size;
130 struct pb_desc desc;
131
132 /**
133 * Temporary CPU storage data. Used when there isn't enough GPU memory to
134 * store the buffer.
135 */
136 void *data;
137
138 /**
139 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
140 * buffer usage.
141 */
142 unsigned flags;
143
144 unsigned mapcount;
145
146 struct pb_validate *vl;
147 unsigned validation_flags;
148
149 struct pipe_fence_handle *fence;
150 };
151
152
153 static INLINE struct fenced_manager *
154 fenced_manager(struct pb_manager *mgr)
155 {
156 assert(mgr);
157 return (struct fenced_manager *)mgr;
158 }
159
160
161 static INLINE struct fenced_buffer *
162 fenced_buffer(struct pb_buffer *buf)
163 {
164 assert(buf);
165 return (struct fenced_buffer *)buf;
166 }
167
168
169 static void
170 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
171
172 static enum pipe_error
173 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
174 struct fenced_buffer *fenced_buf);
175
176 static void
177 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
178
179 static enum pipe_error
180 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
181 struct fenced_buffer *fenced_buf,
182 boolean wait);
183
184 static enum pipe_error
185 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
186
187 static enum pipe_error
188 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
189
190
191 /**
192 * Dump the fenced buffer list.
193 *
194 * Useful to understand failures to allocate buffers.
195 */
196 static void
197 fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
198 {
199 #ifdef DEBUG
200 struct pb_fence_ops *ops = fenced_mgr->ops;
201 struct list_head *curr, *next;
202 struct fenced_buffer *fenced_buf;
203
204 debug_printf("%10s %7s %8s %7s %10s %s\n",
205 "buffer", "size", "refcount", "storage", "fence", "signalled");
206
207 curr = fenced_mgr->unfenced.next;
208 next = curr->next;
209 while(curr != &fenced_mgr->unfenced) {
210 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
211 assert(!fenced_buf->fence);
212 debug_printf("%10p %7u %8u %7s\n",
213 (void *) fenced_buf,
214 fenced_buf->base.base.size,
215 p_atomic_read(&fenced_buf->base.base.reference.count),
216 fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
217 curr = next;
218 next = curr->next;
219 }
220
221 curr = fenced_mgr->fenced.next;
222 next = curr->next;
223 while(curr != &fenced_mgr->fenced) {
224 int signaled;
225 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
226 assert(fenced_buf->buffer);
227 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
228 debug_printf("%10p %7u %8u %7s %10p %s\n",
229 (void *) fenced_buf,
230 fenced_buf->base.base.size,
231 p_atomic_read(&fenced_buf->base.base.reference.count),
232 "gpu",
233 (void *) fenced_buf->fence,
234 signaled == 0 ? "y" : "n");
235 curr = next;
236 next = curr->next;
237 }
238 #else
239 (void)fenced_mgr;
240 #endif
241 }
242
243
244 static INLINE void
245 fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
246 struct fenced_buffer *fenced_buf)
247 {
248 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
249
250 assert(!fenced_buf->fence);
251 assert(fenced_buf->head.prev);
252 assert(fenced_buf->head.next);
253 LIST_DEL(&fenced_buf->head);
254 assert(fenced_mgr->num_unfenced);
255 --fenced_mgr->num_unfenced;
256
257 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
258 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
259
260 FREE(fenced_buf);
261 }
262
263
264 /**
265 * Add the buffer to the fenced list.
266 *
267 * Reference count should be incremented before calling this function.
268 */
269 static INLINE void
270 fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
271 struct fenced_buffer *fenced_buf)
272 {
273 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
274 assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
275 assert(fenced_buf->fence);
276
277 p_atomic_inc(&fenced_buf->base.base.reference.count);
278
279 LIST_DEL(&fenced_buf->head);
280 assert(fenced_mgr->num_unfenced);
281 --fenced_mgr->num_unfenced;
282 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
283 ++fenced_mgr->num_fenced;
284 }
285
286
287 /**
288 * Remove the buffer from the fenced list, and potentially destroy the buffer
289 * if the reference count reaches zero.
290 *
291 * Returns TRUE if the buffer was detroyed.
292 */
293 static INLINE boolean
294 fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
295 struct fenced_buffer *fenced_buf)
296 {
297 struct pb_fence_ops *ops = fenced_mgr->ops;
298
299 assert(fenced_buf->fence);
300 assert(fenced_buf->mgr == fenced_mgr);
301
302 ops->fence_reference(ops, &fenced_buf->fence, NULL);
303 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
304
305 assert(fenced_buf->head.prev);
306 assert(fenced_buf->head.next);
307
308 LIST_DEL(&fenced_buf->head);
309 assert(fenced_mgr->num_fenced);
310 --fenced_mgr->num_fenced;
311
312 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
313 ++fenced_mgr->num_unfenced;
314
315 if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
316 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
317 return TRUE;
318 }
319
320 return FALSE;
321 }
322
323
324 /**
325 * Wait for the fence to expire, and remove it from the fenced list.
326 *
327 * This function will release and re-aquire the mutex, so any copy of mutable
328 * state must be discarded after calling it.
329 */
330 static INLINE enum pipe_error
331 fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
332 struct fenced_buffer *fenced_buf)
333 {
334 struct pb_fence_ops *ops = fenced_mgr->ops;
335 enum pipe_error ret = PIPE_ERROR;
336
337 #if 0
338 debug_warning("waiting for GPU");
339 #endif
340
341 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
342 assert(fenced_buf->fence);
343
344 if(fenced_buf->fence) {
345 struct pipe_fence_handle *fence = NULL;
346 int finished;
347 boolean proceed;
348
349 ops->fence_reference(ops, &fence, fenced_buf->fence);
350
351 pipe_mutex_unlock(fenced_mgr->mutex);
352
353 finished = ops->fence_finish(ops, fenced_buf->fence, 0);
354
355 pipe_mutex_lock(fenced_mgr->mutex);
356
357 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
358
359 /*
360 * Only proceed if the fence object didn't change in the meanwhile.
361 * Otherwise assume the work has been already carried out by another
362 * thread that re-aquired the lock before us.
363 */
364 proceed = fence == fenced_buf->fence ? TRUE : FALSE;
365
366 ops->fence_reference(ops, &fence, NULL);
367
368 if(proceed && finished == 0) {
369 /*
370 * Remove from the fenced list
371 */
372
373 boolean destroyed;
374
375 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
376
377 /* TODO: remove consequents buffers with the same fence? */
378
379 assert(!destroyed);
380
381 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
382
383 ret = PIPE_OK;
384 }
385 }
386
387 return ret;
388 }
389
390
391 /**
392 * Remove as many fenced buffers from the fenced list as possible.
393 *
394 * Returns TRUE if at least one buffer was removed.
395 */
396 static boolean
397 fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
398 boolean wait)
399 {
400 struct pb_fence_ops *ops = fenced_mgr->ops;
401 struct list_head *curr, *next;
402 struct fenced_buffer *fenced_buf;
403 struct pb_buffer *pb_buf;
404 struct pipe_fence_handle *prev_fence = NULL;
405 boolean ret = FALSE;
406
407 curr = fenced_mgr->fenced.next;
408 next = curr->next;
409 while(curr != &fenced_mgr->fenced) {
410 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
411
412 if(fenced_buf->fence != prev_fence) {
413 int signaled;
414
415 if (wait) {
416 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
417
418 /*
419 * Don't return just now. Instead preemptively check if the
420 * following buffers' fences already expired, without further waits.
421 */
422 wait = FALSE;
423 }
424 else {
425 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
426 }
427
428 if (signaled != 0) {
429 return ret;
430 }
431
432 prev_fence = fenced_buf->fence;
433 }
434 else {
435 /* This buffer's fence object is identical to the previous buffer's
436 * fence object, so no need to check the fence again.
437 */
438 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
439 }
440
441 fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
442
443 ret = TRUE;
444
445 curr = next;
446 next = curr->next;
447 }
448
449 return ret;
450 }
451
452
453 /**
454 * Try to free some GPU memory by backing it up into CPU memory.
455 *
456 * Returns TRUE if at least one buffer was freed.
457 */
458 static boolean
459 fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
460 {
461 struct list_head *curr, *next;
462 struct fenced_buffer *fenced_buf;
463
464 curr = fenced_mgr->unfenced.next;
465 next = curr->next;
466 while(curr != &fenced_mgr->unfenced) {
467 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
468
469 /*
470 * We can only move storage if the buffer is not mapped and not
471 * validated.
472 */
473 if(fenced_buf->buffer &&
474 !fenced_buf->mapcount &&
475 !fenced_buf->vl) {
476 enum pipe_error ret;
477
478 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
479 if(ret == PIPE_OK) {
480 ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
481 if(ret == PIPE_OK) {
482 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
483 return TRUE;
484 }
485 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
486 }
487 }
488
489 curr = next;
490 next = curr->next;
491 }
492
493 return FALSE;
494 }
495
496
497 /**
498 * Destroy CPU storage for this buffer.
499 */
500 static void
501 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
502 {
503 if(fenced_buf->data) {
504 align_free(fenced_buf->data);
505 fenced_buf->data = NULL;
506 assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
507 fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
508 }
509 }
510
511
512 /**
513 * Create CPU storage for this buffer.
514 */
515 static enum pipe_error
516 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
517 struct fenced_buffer *fenced_buf)
518 {
519 assert(!fenced_buf->data);
520 if(fenced_buf->data)
521 return PIPE_OK;
522
523 if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
524 return PIPE_ERROR_OUT_OF_MEMORY;
525
526 fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
527 if(!fenced_buf->data)
528 return PIPE_ERROR_OUT_OF_MEMORY;
529
530 fenced_mgr->cpu_total_size += fenced_buf->size;
531
532 return PIPE_OK;
533 }
534
535
536 /**
537 * Destroy the GPU storage.
538 */
539 static void
540 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
541 {
542 if(fenced_buf->buffer) {
543 pb_reference(&fenced_buf->buffer, NULL);
544 }
545 }
546
547
548 /**
549 * Try to create GPU storage for this buffer.
550 *
551 * This function is a shorthand around pb_manager::create_buffer for
552 * fenced_buffer_create_gpu_storage_locked()'s benefit.
553 */
554 static INLINE boolean
555 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
556 struct fenced_buffer *fenced_buf)
557 {
558 struct pb_manager *provider = fenced_mgr->provider;
559
560 assert(!fenced_buf->buffer);
561
562 fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
563 fenced_buf->size,
564 &fenced_buf->desc);
565 return fenced_buf->buffer ? TRUE : FALSE;
566 }
567
568
569 /**
570 * Create GPU storage for this buffer.
571 */
572 static enum pipe_error
573 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
574 struct fenced_buffer *fenced_buf,
575 boolean wait)
576 {
577 assert(!fenced_buf->buffer);
578
579 /*
580 * Check for signaled buffers before trying to allocate.
581 */
582 fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
583
584 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
585
586 /*
587 * Keep trying while there is some sort of progress:
588 * - fences are expiring,
589 * - or buffers are being being swapped out from GPU memory into CPU memory.
590 */
591 while(!fenced_buf->buffer &&
592 (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
593 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
594 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
595 }
596
597 if(!fenced_buf->buffer && wait) {
598 /*
599 * Same as before, but this time around, wait to free buffers if
600 * necessary.
601 */
602 while(!fenced_buf->buffer &&
603 (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
604 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
605 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
606 }
607 }
608
609 if(!fenced_buf->buffer) {
610 if(0)
611 fenced_manager_dump_locked(fenced_mgr);
612
613 /* give up */
614 return PIPE_ERROR_OUT_OF_MEMORY;
615 }
616
617 return PIPE_OK;
618 }
619
620
621 static enum pipe_error
622 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
623 {
624 uint8_t *map;
625
626 assert(fenced_buf->data);
627 assert(fenced_buf->buffer);
628
629 map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
630 if(!map)
631 return PIPE_ERROR;
632
633 memcpy(map, fenced_buf->data, fenced_buf->size);
634
635 pb_unmap(fenced_buf->buffer);
636
637 return PIPE_OK;
638 }
639
640
641 static enum pipe_error
642 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
643 {
644 const uint8_t *map;
645
646 assert(fenced_buf->data);
647 assert(fenced_buf->buffer);
648
649 map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
650 if(!map)
651 return PIPE_ERROR;
652
653 memcpy(fenced_buf->data, map, fenced_buf->size);
654
655 pb_unmap(fenced_buf->buffer);
656
657 return PIPE_OK;
658 }
659
660
661 static void
662 fenced_buffer_destroy(struct pb_buffer *buf)
663 {
664 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
665 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
666
667 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
668
669 pipe_mutex_lock(fenced_mgr->mutex);
670
671 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
672
673 pipe_mutex_unlock(fenced_mgr->mutex);
674 }
675
676
677 static void *
678 fenced_buffer_map(struct pb_buffer *buf,
679 unsigned flags)
680 {
681 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
682 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
683 struct pb_fence_ops *ops = fenced_mgr->ops;
684 void *map = NULL;
685
686 pipe_mutex_lock(fenced_mgr->mutex);
687
688 assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
689
690 /*
691 * Serialize writes.
692 */
693 while((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
694 ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) &&
695 (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
696
697 /*
698 * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
699 */
700 if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
701 ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
702 goto done;
703 }
704
705 if (flags & PIPE_BUFFER_USAGE_UNSYNCHRONIZED) {
706 break;
707 }
708
709 /*
710 * Wait for the GPU to finish accessing. This will release and re-acquire
711 * the mutex, so all copies of mutable state must be discarded.
712 */
713 fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
714 }
715
716 if(fenced_buf->buffer) {
717 map = pb_map(fenced_buf->buffer, flags);
718 }
719 else {
720 assert(fenced_buf->data);
721 map = fenced_buf->data;
722 }
723
724 if(map) {
725 ++fenced_buf->mapcount;
726 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
727 }
728
729 done:
730 pipe_mutex_unlock(fenced_mgr->mutex);
731
732 return map;
733 }
734
735
736 static void
737 fenced_buffer_unmap(struct pb_buffer *buf)
738 {
739 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
740 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
741
742 pipe_mutex_lock(fenced_mgr->mutex);
743
744 assert(fenced_buf->mapcount);
745 if(fenced_buf->mapcount) {
746 if (fenced_buf->buffer)
747 pb_unmap(fenced_buf->buffer);
748 --fenced_buf->mapcount;
749 if(!fenced_buf->mapcount)
750 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
751 }
752
753 pipe_mutex_unlock(fenced_mgr->mutex);
754 }
755
756
757 static enum pipe_error
758 fenced_buffer_validate(struct pb_buffer *buf,
759 struct pb_validate *vl,
760 unsigned flags)
761 {
762 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
763 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
764 enum pipe_error ret;
765
766 pipe_mutex_lock(fenced_mgr->mutex);
767
768 if(!vl) {
769 /* invalidate */
770 fenced_buf->vl = NULL;
771 fenced_buf->validation_flags = 0;
772 ret = PIPE_OK;
773 goto done;
774 }
775
776 assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
777 assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
778 flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
779
780 /* Buffer cannot be validated in two different lists */
781 if(fenced_buf->vl && fenced_buf->vl != vl) {
782 ret = PIPE_ERROR_RETRY;
783 goto done;
784 }
785
786 if(fenced_buf->vl == vl &&
787 (fenced_buf->validation_flags & flags) == flags) {
788 /* Nothing to do -- buffer already validated */
789 ret = PIPE_OK;
790 goto done;
791 }
792
793 /*
794 * Create and update GPU storage.
795 */
796 if(!fenced_buf->buffer) {
797 assert(!fenced_buf->mapcount);
798
799 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
800 if(ret != PIPE_OK) {
801 goto done;
802 }
803
804 ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
805 if(ret != PIPE_OK) {
806 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
807 goto done;
808 }
809
810 if(fenced_buf->mapcount) {
811 debug_printf("warning: validating a buffer while it is still mapped\n");
812 }
813 else {
814 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
815 }
816 }
817
818 ret = pb_validate(fenced_buf->buffer, vl, flags);
819 if (ret != PIPE_OK)
820 goto done;
821
822 fenced_buf->vl = vl;
823 fenced_buf->validation_flags |= flags;
824
825 done:
826 pipe_mutex_unlock(fenced_mgr->mutex);
827
828 return ret;
829 }
830
831
832 static void
833 fenced_buffer_fence(struct pb_buffer *buf,
834 struct pipe_fence_handle *fence)
835 {
836 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
837 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
838 struct pb_fence_ops *ops = fenced_mgr->ops;
839
840 pipe_mutex_lock(fenced_mgr->mutex);
841
842 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
843 assert(fenced_buf->buffer);
844
845 if(fence != fenced_buf->fence) {
846 assert(fenced_buf->vl);
847 assert(fenced_buf->validation_flags);
848
849 if (fenced_buf->fence) {
850 boolean destroyed;
851 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
852 assert(!destroyed);
853 }
854 if (fence) {
855 ops->fence_reference(ops, &fenced_buf->fence, fence);
856 fenced_buf->flags |= fenced_buf->validation_flags;
857 fenced_buffer_add_locked(fenced_mgr, fenced_buf);
858 }
859
860 pb_fence(fenced_buf->buffer, fence);
861
862 fenced_buf->vl = NULL;
863 fenced_buf->validation_flags = 0;
864 }
865
866 pipe_mutex_unlock(fenced_mgr->mutex);
867 }
868
869
870 static void
871 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
872 struct pb_buffer **base_buf,
873 pb_size *offset)
874 {
875 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
876 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
877
878 pipe_mutex_lock(fenced_mgr->mutex);
879
880 /*
881 * This should only be called when the buffer is validated. Typically
882 * when processing relocations.
883 */
884 assert(fenced_buf->vl);
885 assert(fenced_buf->buffer);
886
887 if(fenced_buf->buffer)
888 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
889 else {
890 *base_buf = buf;
891 *offset = 0;
892 }
893
894 pipe_mutex_unlock(fenced_mgr->mutex);
895 }
896
897
898 static const struct pb_vtbl
899 fenced_buffer_vtbl = {
900 fenced_buffer_destroy,
901 fenced_buffer_map,
902 fenced_buffer_unmap,
903 fenced_buffer_validate,
904 fenced_buffer_fence,
905 fenced_buffer_get_base_buffer
906 };
907
908
909 /**
910 * Wrap a buffer in a fenced buffer.
911 */
912 static struct pb_buffer *
913 fenced_bufmgr_create_buffer(struct pb_manager *mgr,
914 pb_size size,
915 const struct pb_desc *desc)
916 {
917 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
918 struct fenced_buffer *fenced_buf;
919 enum pipe_error ret;
920
921 /*
922 * Don't stall the GPU, waste time evicting buffers, or waste memory
923 * trying to create a buffer that will most likely never fit into the
924 * graphics aperture.
925 */
926 if(size > fenced_mgr->max_buffer_size) {
927 goto no_buffer;
928 }
929
930 fenced_buf = CALLOC_STRUCT(fenced_buffer);
931 if(!fenced_buf)
932 goto no_buffer;
933
934 pipe_reference_init(&fenced_buf->base.base.reference, 1);
935 fenced_buf->base.base.alignment = desc->alignment;
936 fenced_buf->base.base.usage = desc->usage;
937 fenced_buf->base.base.size = size;
938 fenced_buf->size = size;
939 fenced_buf->desc = *desc;
940
941 fenced_buf->base.vtbl = &fenced_buffer_vtbl;
942 fenced_buf->mgr = fenced_mgr;
943
944 pipe_mutex_lock(fenced_mgr->mutex);
945
946 /*
947 * Try to create GPU storage without stalling,
948 */
949 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
950
951 /*
952 * Attempt to use CPU memory to avoid stalling the GPU.
953 */
954 if(ret != PIPE_OK) {
955 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
956 }
957
958 /*
959 * Create GPU storage, waiting for some to be available.
960 */
961 if(ret != PIPE_OK) {
962 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
963 }
964
965 /*
966 * Give up.
967 */
968 if(ret != PIPE_OK) {
969 goto no_storage;
970 }
971
972 assert(fenced_buf->buffer || fenced_buf->data);
973
974 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
975 ++fenced_mgr->num_unfenced;
976 pipe_mutex_unlock(fenced_mgr->mutex);
977
978 return &fenced_buf->base;
979
980 no_storage:
981 pipe_mutex_unlock(fenced_mgr->mutex);
982 FREE(fenced_buf);
983 no_buffer:
984 return NULL;
985 }
986
987
988 static void
989 fenced_bufmgr_flush(struct pb_manager *mgr)
990 {
991 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
992
993 pipe_mutex_lock(fenced_mgr->mutex);
994 while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
995 ;
996 pipe_mutex_unlock(fenced_mgr->mutex);
997
998 assert(fenced_mgr->provider->flush);
999 if(fenced_mgr->provider->flush)
1000 fenced_mgr->provider->flush(fenced_mgr->provider);
1001 }
1002
1003
1004 static void
1005 fenced_bufmgr_destroy(struct pb_manager *mgr)
1006 {
1007 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
1008
1009 pipe_mutex_lock(fenced_mgr->mutex);
1010
1011 /* Wait on outstanding fences */
1012 while (fenced_mgr->num_fenced) {
1013 pipe_mutex_unlock(fenced_mgr->mutex);
1014 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
1015 sched_yield();
1016 #endif
1017 pipe_mutex_lock(fenced_mgr->mutex);
1018 while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
1019 ;
1020 }
1021
1022 #ifdef DEBUG
1023 /*assert(!fenced_mgr->num_unfenced);*/
1024 #endif
1025
1026 pipe_mutex_unlock(fenced_mgr->mutex);
1027 pipe_mutex_destroy(fenced_mgr->mutex);
1028
1029 if(fenced_mgr->provider)
1030 fenced_mgr->provider->destroy(fenced_mgr->provider);
1031
1032 fenced_mgr->ops->destroy(fenced_mgr->ops);
1033
1034 FREE(fenced_mgr);
1035 }
1036
1037
1038 struct pb_manager *
1039 fenced_bufmgr_create(struct pb_manager *provider,
1040 struct pb_fence_ops *ops,
1041 pb_size max_buffer_size,
1042 pb_size max_cpu_total_size)
1043 {
1044 struct fenced_manager *fenced_mgr;
1045
1046 if(!provider)
1047 return NULL;
1048
1049 fenced_mgr = CALLOC_STRUCT(fenced_manager);
1050 if (!fenced_mgr)
1051 return NULL;
1052
1053 fenced_mgr->base.destroy = fenced_bufmgr_destroy;
1054 fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
1055 fenced_mgr->base.flush = fenced_bufmgr_flush;
1056
1057 fenced_mgr->provider = provider;
1058 fenced_mgr->ops = ops;
1059 fenced_mgr->max_buffer_size = max_buffer_size;
1060 fenced_mgr->max_cpu_total_size = max_cpu_total_size;
1061
1062 LIST_INITHEAD(&fenced_mgr->fenced);
1063 fenced_mgr->num_fenced = 0;
1064
1065 LIST_INITHEAD(&fenced_mgr->unfenced);
1066 fenced_mgr->num_unfenced = 0;
1067
1068 pipe_mutex_init(fenced_mgr->mutex);
1069
1070 return &fenced_mgr->base;
1071 }