Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 */
35
36
37 #include "pipe/p_config.h"
38
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40 #include <unistd.h>
41 #include <sched.h>
42 #endif
43
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "pipe/p_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
50
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
53
54
55
56 /**
57 * Convenience macro (type safe).
58 */
59 #define SUPER(__derived) (&(__derived)->base)
60
61
62 struct fenced_buffer_list
63 {
64 pipe_mutex mutex;
65
66 struct pb_fence_ops *ops;
67
68 pb_size numDelayed;
69 struct list_head delayed;
70
71 #ifdef DEBUG
72 pb_size numUnfenced;
73 struct list_head unfenced;
74 #endif
75 };
76
77
78 /**
79 * Wrapper around a pipe buffer which adds fencing and reference counting.
80 */
81 struct fenced_buffer
82 {
83 /*
84 * Immutable members.
85 */
86
87 struct pb_buffer base;
88 struct pb_buffer *buffer;
89 struct fenced_buffer_list *list;
90
91 /**
92 * Protected by fenced_buffer_list::mutex
93 */
94 struct list_head head;
95
96 /**
97 * Following members are mutable and protected by this mutex.
98 *
99 * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
100 * held, but in order to prevent deadlocks you must never lock
101 * fenced_buffer_list::mutex with this mutex held.
102 */
103 pipe_mutex mutex;
104
105 /**
106 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
107 * buffer usage.
108 */
109 unsigned flags;
110
111 unsigned mapcount;
112 struct pb_validate *vl;
113 unsigned validation_flags;
114 struct pipe_fence_handle *fence;
115 };
116
117
118 static INLINE struct fenced_buffer *
119 fenced_buffer(struct pb_buffer *buf)
120 {
121 assert(buf);
122 return (struct fenced_buffer *)buf;
123 }
124
125
126 /**
127 * Add the buffer to the fenced list.
128 *
129 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
130 * order before calling this function.
131 *
132 * Reference count should be incremented before calling this function.
133 */
134 static INLINE void
135 fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list,
136 struct fenced_buffer *fenced_buf)
137 {
138 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
139 assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
140 assert(fenced_buf->fence);
141
142 /* TODO: Move the reference count increment here */
143
144 #ifdef DEBUG
145 LIST_DEL(&fenced_buf->head);
146 assert(fenced_list->numUnfenced);
147 --fenced_list->numUnfenced;
148 #endif
149 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
150 ++fenced_list->numDelayed;
151 }
152
153
154 /**
155 * Remove the buffer from the fenced list.
156 *
157 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
158 * order before calling this function.
159 *
160 * Reference count should be decremented after calling this function.
161 */
162 static INLINE void
163 fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
164 struct fenced_buffer *fenced_buf)
165 {
166 struct pb_fence_ops *ops = fenced_list->ops;
167
168 assert(fenced_buf->fence);
169 assert(fenced_buf->list == fenced_list);
170
171 ops->fence_reference(ops, &fenced_buf->fence, NULL);
172 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
173
174 assert(fenced_buf->head.prev);
175 assert(fenced_buf->head.next);
176
177 LIST_DEL(&fenced_buf->head);
178 assert(fenced_list->numDelayed);
179 --fenced_list->numDelayed;
180
181 #ifdef DEBUG
182 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
183 ++fenced_list->numUnfenced;
184 #endif
185
186 /* TODO: Move the reference count decrement and destruction here */
187 }
188
189
190 /**
191 * Wait for the fence to expire, and remove it from the fenced list.
192 *
193 * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be
194 * held -- it will
195 */
196 static INLINE enum pipe_error
197 fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
198 struct fenced_buffer *fenced_buf)
199 {
200 struct pb_fence_ops *ops = fenced_list->ops;
201 enum pipe_error ret = PIPE_ERROR;
202
203 #if 0
204 debug_warning("waiting for GPU");
205 #endif
206
207 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
208 assert(fenced_buf->fence);
209
210 /* Acquire the global lock */
211 pipe_mutex_unlock(fenced_buf->mutex);
212 pipe_mutex_lock(fenced_list->mutex);
213 pipe_mutex_lock(fenced_buf->mutex);
214
215 if(fenced_buf->fence) {
216 if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
217 /* Remove from the fenced list */
218 /* TODO: remove consequents */
219 fenced_buffer_remove_locked(fenced_list, fenced_buf);
220
221 p_atomic_dec(&fenced_buf->base.base.reference.count);
222 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
223
224 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
225
226 ret = PIPE_OK;
227 }
228 }
229
230 pipe_mutex_unlock(fenced_list->mutex);
231
232 return ret;
233 }
234
235
236 /**
237 * Free as many fenced buffers from the list head as possible.
238 */
239 static void
240 fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list,
241 int wait)
242 {
243 struct pb_fence_ops *ops = fenced_list->ops;
244 struct list_head *curr, *next;
245 struct fenced_buffer *fenced_buf;
246 struct pb_buffer *pb_buf;
247 struct pipe_fence_handle *prev_fence = NULL;
248
249 curr = fenced_list->delayed.next;
250 next = curr->next;
251 while(curr != &fenced_list->delayed) {
252 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
253
254 pipe_mutex_lock(fenced_buf->mutex);
255
256 if(fenced_buf->fence != prev_fence) {
257 int signaled;
258 if (wait)
259 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
260 else
261 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
262 if (signaled != 0) {
263 pipe_mutex_unlock(fenced_buf->mutex);
264 break;
265 }
266 prev_fence = fenced_buf->fence;
267 }
268 else {
269 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
270 }
271
272 fenced_buffer_remove_locked(fenced_list, fenced_buf);
273 pipe_mutex_unlock(fenced_buf->mutex);
274
275 pb_buf = &fenced_buf->base;
276 pb_reference(&pb_buf, NULL);
277
278
279 curr = next;
280 next = curr->next;
281 }
282 }
283
284
285 static void
286 fenced_buffer_destroy(struct pb_buffer *buf)
287 {
288 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
289 struct fenced_buffer_list *fenced_list = fenced_buf->list;
290
291 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
292 assert(!fenced_buf->fence);
293
294 #ifdef DEBUG
295 pipe_mutex_lock(fenced_list->mutex);
296 assert(fenced_buf->head.prev);
297 assert(fenced_buf->head.next);
298 LIST_DEL(&fenced_buf->head);
299 assert(fenced_list->numUnfenced);
300 --fenced_list->numUnfenced;
301 pipe_mutex_unlock(fenced_list->mutex);
302 #else
303 (void)fenced_list;
304 #endif
305
306 pb_reference(&fenced_buf->buffer, NULL);
307
308 pipe_mutex_destroy(fenced_buf->mutex);
309 FREE(fenced_buf);
310 }
311
312
313 static void *
314 fenced_buffer_map(struct pb_buffer *buf,
315 unsigned flags)
316 {
317 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
318 struct fenced_buffer_list *fenced_list = fenced_buf->list;
319 struct pb_fence_ops *ops = fenced_list->ops;
320 void *map = NULL;
321
322 pipe_mutex_lock(fenced_buf->mutex);
323
324 assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
325
326 /* Serialize writes */
327 if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
328 ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
329 if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
330 ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
331 /* Don't wait for the GPU to finish writing */
332 goto finish;
333 }
334
335 /* Wait for the GPU to finish writing */
336 fenced_buffer_finish_locked(fenced_list, fenced_buf);
337 }
338
339 #if 0
340 /* Check for CPU write access (read is OK) */
341 if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
342 /* this is legal -- just for debugging */
343 debug_warning("concurrent CPU writes");
344 }
345 #endif
346
347 map = pb_map(fenced_buf->buffer, flags);
348 if(map) {
349 ++fenced_buf->mapcount;
350 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
351 }
352
353 finish:
354 pipe_mutex_unlock(fenced_buf->mutex);
355
356 return map;
357 }
358
359
360 static void
361 fenced_buffer_unmap(struct pb_buffer *buf)
362 {
363 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
364
365 pipe_mutex_lock(fenced_buf->mutex);
366
367 assert(fenced_buf->mapcount);
368 if(fenced_buf->mapcount) {
369 pb_unmap(fenced_buf->buffer);
370 --fenced_buf->mapcount;
371 if(!fenced_buf->mapcount)
372 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
373 }
374
375 pipe_mutex_unlock(fenced_buf->mutex);
376 }
377
378
379 static enum pipe_error
380 fenced_buffer_validate(struct pb_buffer *buf,
381 struct pb_validate *vl,
382 unsigned flags)
383 {
384 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
385 enum pipe_error ret;
386
387 pipe_mutex_lock(fenced_buf->mutex);
388
389 if(!vl) {
390 /* invalidate */
391 fenced_buf->vl = NULL;
392 fenced_buf->validation_flags = 0;
393 ret = PIPE_OK;
394 goto finish;
395 }
396
397 assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
398 assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
399 flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
400
401 /* Buffer cannot be validated in two different lists */
402 if(fenced_buf->vl && fenced_buf->vl != vl) {
403 ret = PIPE_ERROR_RETRY;
404 goto finish;
405 }
406
407 #if 0
408 /* Do not validate if buffer is still mapped */
409 if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
410 /* TODO: wait for the thread that mapped the buffer to unmap it */
411 ret = PIPE_ERROR_RETRY;
412 goto finish;
413 }
414 /* Final sanity checking */
415 assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
416 assert(!fenced_buf->mapcount);
417 #endif
418
419 if(fenced_buf->vl == vl &&
420 (fenced_buf->validation_flags & flags) == flags) {
421 /* Nothing to do -- buffer already validated */
422 ret = PIPE_OK;
423 goto finish;
424 }
425
426 ret = pb_validate(fenced_buf->buffer, vl, flags);
427 if (ret != PIPE_OK)
428 goto finish;
429
430 fenced_buf->vl = vl;
431 fenced_buf->validation_flags |= flags;
432
433 finish:
434 pipe_mutex_unlock(fenced_buf->mutex);
435
436 return ret;
437 }
438
439
440 static void
441 fenced_buffer_fence(struct pb_buffer *buf,
442 struct pipe_fence_handle *fence)
443 {
444 struct fenced_buffer *fenced_buf;
445 struct fenced_buffer_list *fenced_list;
446 struct pb_fence_ops *ops;
447
448 fenced_buf = fenced_buffer(buf);
449 fenced_list = fenced_buf->list;
450 ops = fenced_list->ops;
451
452 pipe_mutex_lock(fenced_list->mutex);
453 pipe_mutex_lock(fenced_buf->mutex);
454
455 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
456
457 if(fence != fenced_buf->fence) {
458 assert(fenced_buf->vl);
459 assert(fenced_buf->validation_flags);
460
461 if (fenced_buf->fence) {
462 fenced_buffer_remove_locked(fenced_list, fenced_buf);
463 p_atomic_dec(&fenced_buf->base.base.reference.count);
464 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
465 }
466 if (fence) {
467 ops->fence_reference(ops, &fenced_buf->fence, fence);
468 fenced_buf->flags |= fenced_buf->validation_flags;
469 p_atomic_inc(&fenced_buf->base.base.reference.count);
470 fenced_buffer_add_locked(fenced_list, fenced_buf);
471 }
472
473 pb_fence(fenced_buf->buffer, fence);
474
475 fenced_buf->vl = NULL;
476 fenced_buf->validation_flags = 0;
477 }
478
479 pipe_mutex_unlock(fenced_buf->mutex);
480 pipe_mutex_unlock(fenced_list->mutex);
481 }
482
483
484 static void
485 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
486 struct pb_buffer **base_buf,
487 pb_size *offset)
488 {
489 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
490 /* NOTE: accesses immutable members only -- mutex not necessary */
491 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
492 }
493
494
495 static const struct pb_vtbl
496 fenced_buffer_vtbl = {
497 fenced_buffer_destroy,
498 fenced_buffer_map,
499 fenced_buffer_unmap,
500 fenced_buffer_validate,
501 fenced_buffer_fence,
502 fenced_buffer_get_base_buffer
503 };
504
505
506 struct pb_buffer *
507 fenced_buffer_create(struct fenced_buffer_list *fenced_list,
508 struct pb_buffer *buffer)
509 {
510 struct fenced_buffer *buf;
511
512 if(!buffer)
513 return NULL;
514
515 buf = CALLOC_STRUCT(fenced_buffer);
516 if(!buf) {
517 pb_reference(&buffer, NULL);
518 return NULL;
519 }
520
521 pipe_reference_init(&buf->base.base.reference, 1);
522 buf->base.base.alignment = buffer->base.alignment;
523 buf->base.base.usage = buffer->base.usage;
524 buf->base.base.size = buffer->base.size;
525
526 buf->base.vtbl = &fenced_buffer_vtbl;
527 buf->buffer = buffer;
528 buf->list = fenced_list;
529
530 pipe_mutex_init(buf->mutex);
531
532 #ifdef DEBUG
533 pipe_mutex_lock(fenced_list->mutex);
534 LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
535 ++fenced_list->numUnfenced;
536 pipe_mutex_unlock(fenced_list->mutex);
537 #endif
538
539 return &buf->base;
540 }
541
542
543 struct fenced_buffer_list *
544 fenced_buffer_list_create(struct pb_fence_ops *ops)
545 {
546 struct fenced_buffer_list *fenced_list;
547
548 fenced_list = CALLOC_STRUCT(fenced_buffer_list);
549 if (!fenced_list)
550 return NULL;
551
552 fenced_list->ops = ops;
553
554 LIST_INITHEAD(&fenced_list->delayed);
555 fenced_list->numDelayed = 0;
556
557 #ifdef DEBUG
558 LIST_INITHEAD(&fenced_list->unfenced);
559 fenced_list->numUnfenced = 0;
560 #endif
561
562 pipe_mutex_init(fenced_list->mutex);
563
564 return fenced_list;
565 }
566
567
568 void
569 fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
570 int wait)
571 {
572 pipe_mutex_lock(fenced_list->mutex);
573 fenced_buffer_list_check_free_locked(fenced_list, wait);
574 pipe_mutex_unlock(fenced_list->mutex);
575 }
576
577
578 #ifdef DEBUG
579 void
580 fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
581 {
582 struct pb_fence_ops *ops = fenced_list->ops;
583 struct list_head *curr, *next;
584 struct fenced_buffer *fenced_buf;
585
586 pipe_mutex_lock(fenced_list->mutex);
587
588 debug_printf("%10s %7s %7s %10s %s\n",
589 "buffer", "size", "refcount", "fence", "signalled");
590
591 curr = fenced_list->unfenced.next;
592 next = curr->next;
593 while(curr != &fenced_list->unfenced) {
594 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
595 pipe_mutex_lock(fenced_buf->mutex);
596 assert(!fenced_buf->fence);
597 debug_printf("%10p %7u %7u\n",
598 (void *) fenced_buf,
599 fenced_buf->base.base.size,
600 p_atomic_read(&fenced_buf->base.base.reference.count));
601 pipe_mutex_unlock(fenced_buf->mutex);
602 curr = next;
603 next = curr->next;
604 }
605
606 curr = fenced_list->delayed.next;
607 next = curr->next;
608 while(curr != &fenced_list->delayed) {
609 int signaled;
610 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
611 pipe_mutex_lock(fenced_buf->mutex);
612 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
613 debug_printf("%10p %7u %7u %10p %s\n",
614 (void *) fenced_buf,
615 fenced_buf->base.base.size,
616 p_atomic_read(&fenced_buf->base.base.reference.count),
617 (void *) fenced_buf->fence,
618 signaled == 0 ? "y" : "n");
619 pipe_mutex_unlock(fenced_buf->mutex);
620 curr = next;
621 next = curr->next;
622 }
623
624 pipe_mutex_unlock(fenced_list->mutex);
625 }
626 #endif
627
628
629 void
630 fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
631 {
632 pipe_mutex_lock(fenced_list->mutex);
633
634 /* Wait on outstanding fences */
635 while (fenced_list->numDelayed) {
636 pipe_mutex_unlock(fenced_list->mutex);
637 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
638 sched_yield();
639 #endif
640 pipe_mutex_lock(fenced_list->mutex);
641 fenced_buffer_list_check_free_locked(fenced_list, 1);
642 }
643
644 #ifdef DEBUG
645 /*assert(!fenced_list->numUnfenced);*/
646 #endif
647
648 pipe_mutex_unlock(fenced_list->mutex);
649 pipe_mutex_destroy(fenced_list->mutex);
650
651 fenced_list->ops->destroy(fenced_list->ops);
652
653 FREE(fenced_list);
654 }
655
656