Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 */
35
36
37 #include "pipe/p_config.h"
38
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40 #include <unistd.h>
41 #include <sched.h>
42 #endif
43
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "pipe/p_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
50
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
53
54
55
56 /**
57 * Convenience macro (type safe).
58 */
59 #define SUPER(__derived) (&(__derived)->base)
60
61
62 struct fenced_buffer_list
63 {
64 pipe_mutex mutex;
65
66 struct pb_fence_ops *ops;
67
68 pb_size numDelayed;
69 struct list_head delayed;
70
71 #ifdef DEBUG
72 pb_size numUnfenced;
73 struct list_head unfenced;
74 #endif
75 };
76
77
78 /**
79 * Wrapper around a pipe buffer which adds fencing and reference counting.
80 */
81 struct fenced_buffer
82 {
83 /*
84 * Immutable members.
85 */
86
87 struct pb_buffer base;
88 struct pb_buffer *buffer;
89 struct fenced_buffer_list *list;
90
91 /**
92 * Protected by fenced_buffer_list::mutex
93 */
94 struct list_head head;
95
96 /**
97 * Following members are mutable and protected by this mutex.
98 *
99 * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
100 * held, but in order to prevent deadlocks you must never lock
101 * fenced_buffer_list::mutex with this mutex held.
102 */
103 pipe_mutex mutex;
104
105 /**
106 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
107 * buffer usage.
108 */
109 unsigned flags;
110
111 unsigned mapcount;
112 struct pb_validate *vl;
113 unsigned validation_flags;
114 struct pipe_fence_handle *fence;
115 };
116
117
118 static INLINE struct fenced_buffer *
119 fenced_buffer(struct pb_buffer *buf)
120 {
121 assert(buf);
122 return (struct fenced_buffer *)buf;
123 }
124
125
126 /**
127 * Add the buffer to the fenced list.
128 *
129 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
130 * order before calling this function.
131 *
132 * Reference count should be incremented before calling this function.
133 */
134 static INLINE void
135 fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list,
136 struct fenced_buffer *fenced_buf)
137 {
138 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
139 assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
140 assert(fenced_buf->fence);
141
142 /* TODO: Move the reference count increment here */
143
144 #ifdef DEBUG
145 LIST_DEL(&fenced_buf->head);
146 assert(fenced_list->numUnfenced);
147 --fenced_list->numUnfenced;
148 #endif
149 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
150 ++fenced_list->numDelayed;
151 }
152
153
154 /**
155 * Remove the buffer from the fenced list.
156 *
157 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
158 * order before calling this function.
159 *
160 * Reference count should be decremented after calling this function.
161 */
162 static INLINE void
163 fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
164 struct fenced_buffer *fenced_buf)
165 {
166 struct pb_fence_ops *ops = fenced_list->ops;
167
168 assert(fenced_buf->fence);
169 assert(fenced_buf->list == fenced_list);
170
171 ops->fence_reference(ops, &fenced_buf->fence, NULL);
172 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
173
174 assert(fenced_buf->head.prev);
175 assert(fenced_buf->head.next);
176
177 LIST_DEL(&fenced_buf->head);
178 assert(fenced_list->numDelayed);
179 --fenced_list->numDelayed;
180
181 #ifdef DEBUG
182 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
183 ++fenced_list->numUnfenced;
184 #endif
185
186 /* TODO: Move the reference count decrement and destruction here */
187 }
188
189
190 /**
191 * Wait for the fence to expire, and remove it from the fenced list.
192 *
193 * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be
194 * held -- it will
195 */
196 static INLINE enum pipe_error
197 fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
198 struct fenced_buffer *fenced_buf)
199 {
200 struct pb_fence_ops *ops = fenced_list->ops;
201 enum pipe_error ret = PIPE_ERROR;
202
203 #if 0
204 debug_warning("waiting for GPU");
205 #endif
206
207 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
208 assert(fenced_buf->fence);
209
210 /* Acquire the global lock */
211 pipe_mutex_unlock(fenced_buf->mutex);
212 pipe_mutex_lock(fenced_list->mutex);
213 pipe_mutex_lock(fenced_buf->mutex);
214
215 if(fenced_buf->fence) {
216 if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
217 /* Remove from the fenced list */
218 /* TODO: remove consequents */
219 fenced_buffer_remove_locked(fenced_list, fenced_buf);
220
221 p_atomic_dec(&fenced_buf->base.base.reference.count);
222 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
223
224 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
225
226 ret = PIPE_OK;
227 }
228 }
229
230 pipe_mutex_unlock(fenced_list->mutex);
231
232 return ret;
233 }
234
235
236 /**
237 * Free as many fenced buffers from the list head as possible.
238 */
239 static void
240 fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list,
241 int wait)
242 {
243 struct pb_fence_ops *ops = fenced_list->ops;
244 struct list_head *curr, *next;
245 struct fenced_buffer *fenced_buf;
246 struct pipe_fence_handle *prev_fence = NULL;
247
248 curr = fenced_list->delayed.next;
249 next = curr->next;
250 while(curr != &fenced_list->delayed) {
251 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
252
253 pipe_mutex_lock(fenced_buf->mutex);
254
255 if(fenced_buf->fence != prev_fence) {
256 int signaled;
257 if (wait)
258 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
259 else
260 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
261 if (signaled != 0) {
262 pipe_mutex_unlock(fenced_buf->mutex);
263 break;
264 }
265 prev_fence = fenced_buf->fence;
266 }
267 else {
268 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
269 }
270
271 fenced_buffer_remove_locked(fenced_list, fenced_buf);
272 pipe_mutex_unlock(fenced_buf->mutex);
273
274 pb_reference((struct pb_buffer **)&fenced_buf, NULL);
275
276 curr = next;
277 next = curr->next;
278 }
279 }
280
281
282 static void
283 fenced_buffer_destroy(struct pb_buffer *buf)
284 {
285 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
286 struct fenced_buffer_list *fenced_list = fenced_buf->list;
287
288 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
289 assert(!fenced_buf->fence);
290
291 #ifdef DEBUG
292 pipe_mutex_lock(fenced_list->mutex);
293 assert(fenced_buf->head.prev);
294 assert(fenced_buf->head.next);
295 LIST_DEL(&fenced_buf->head);
296 assert(fenced_list->numUnfenced);
297 --fenced_list->numUnfenced;
298 pipe_mutex_unlock(fenced_list->mutex);
299 #else
300 (void)fenced_list;
301 #endif
302
303 pb_reference(&fenced_buf->buffer, NULL);
304
305 pipe_mutex_destroy(fenced_buf->mutex);
306 FREE(fenced_buf);
307 }
308
309
310 static void *
311 fenced_buffer_map(struct pb_buffer *buf,
312 unsigned flags)
313 {
314 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
315 struct fenced_buffer_list *fenced_list = fenced_buf->list;
316 struct pb_fence_ops *ops = fenced_list->ops;
317 void *map = NULL;
318
319 pipe_mutex_lock(fenced_buf->mutex);
320
321 assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
322
323 /* Serialize writes */
324 if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
325 ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
326 if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
327 ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
328 /* Don't wait for the GPU to finish writing */
329 goto finish;
330 }
331
332 /* Wait for the GPU to finish writing */
333 fenced_buffer_finish_locked(fenced_list, fenced_buf);
334 }
335
336 #if 0
337 /* Check for CPU write access (read is OK) */
338 if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
339 /* this is legal -- just for debugging */
340 debug_warning("concurrent CPU writes");
341 }
342 #endif
343
344 map = pb_map(fenced_buf->buffer, flags);
345 if(map) {
346 ++fenced_buf->mapcount;
347 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
348 }
349
350 finish:
351 pipe_mutex_unlock(fenced_buf->mutex);
352
353 return map;
354 }
355
356
357 static void
358 fenced_buffer_unmap(struct pb_buffer *buf)
359 {
360 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
361
362 pipe_mutex_lock(fenced_buf->mutex);
363
364 assert(fenced_buf->mapcount);
365 if(fenced_buf->mapcount) {
366 pb_unmap(fenced_buf->buffer);
367 --fenced_buf->mapcount;
368 if(!fenced_buf->mapcount)
369 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
370 }
371
372 pipe_mutex_unlock(fenced_buf->mutex);
373 }
374
375
376 static enum pipe_error
377 fenced_buffer_validate(struct pb_buffer *buf,
378 struct pb_validate *vl,
379 unsigned flags)
380 {
381 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
382 enum pipe_error ret;
383
384 pipe_mutex_lock(fenced_buf->mutex);
385
386 if(!vl) {
387 /* invalidate */
388 fenced_buf->vl = NULL;
389 fenced_buf->validation_flags = 0;
390 ret = PIPE_OK;
391 goto finish;
392 }
393
394 assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
395 assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
396 flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
397
398 /* Buffer cannot be validated in two different lists */
399 if(fenced_buf->vl && fenced_buf->vl != vl) {
400 ret = PIPE_ERROR_RETRY;
401 goto finish;
402 }
403
404 #if 0
405 /* Do not validate if buffer is still mapped */
406 if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
407 /* TODO: wait for the thread that mapped the buffer to unmap it */
408 ret = PIPE_ERROR_RETRY;
409 goto finish;
410 }
411 /* Final sanity checking */
412 assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
413 assert(!fenced_buf->mapcount);
414 #endif
415
416 if(fenced_buf->vl == vl &&
417 (fenced_buf->validation_flags & flags) == flags) {
418 /* Nothing to do -- buffer already validated */
419 ret = PIPE_OK;
420 goto finish;
421 }
422
423 ret = pb_validate(fenced_buf->buffer, vl, flags);
424 if (ret != PIPE_OK)
425 goto finish;
426
427 fenced_buf->vl = vl;
428 fenced_buf->validation_flags |= flags;
429
430 finish:
431 pipe_mutex_unlock(fenced_buf->mutex);
432
433 return ret;
434 }
435
436
437 static void
438 fenced_buffer_fence(struct pb_buffer *buf,
439 struct pipe_fence_handle *fence)
440 {
441 struct fenced_buffer *fenced_buf;
442 struct fenced_buffer_list *fenced_list;
443 struct pb_fence_ops *ops;
444
445 fenced_buf = fenced_buffer(buf);
446 fenced_list = fenced_buf->list;
447 ops = fenced_list->ops;
448
449 pipe_mutex_lock(fenced_list->mutex);
450 pipe_mutex_lock(fenced_buf->mutex);
451
452 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
453
454 if(fence != fenced_buf->fence) {
455 assert(fenced_buf->vl);
456 assert(fenced_buf->validation_flags);
457
458 if (fenced_buf->fence) {
459 fenced_buffer_remove_locked(fenced_list, fenced_buf);
460 p_atomic_dec(&fenced_buf->base.base.reference.count);
461 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
462 }
463 if (fence) {
464 ops->fence_reference(ops, &fenced_buf->fence, fence);
465 fenced_buf->flags |= fenced_buf->validation_flags;
466 p_atomic_inc(&fenced_buf->base.base.reference.count);
467 fenced_buffer_add_locked(fenced_list, fenced_buf);
468 }
469
470 pb_fence(fenced_buf->buffer, fence);
471
472 fenced_buf->vl = NULL;
473 fenced_buf->validation_flags = 0;
474 }
475
476 pipe_mutex_unlock(fenced_buf->mutex);
477 pipe_mutex_unlock(fenced_list->mutex);
478 }
479
480
481 static void
482 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
483 struct pb_buffer **base_buf,
484 pb_size *offset)
485 {
486 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
487 /* NOTE: accesses immutable members only -- mutex not necessary */
488 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
489 }
490
491
492 static const struct pb_vtbl
493 fenced_buffer_vtbl = {
494 fenced_buffer_destroy,
495 fenced_buffer_map,
496 fenced_buffer_unmap,
497 fenced_buffer_validate,
498 fenced_buffer_fence,
499 fenced_buffer_get_base_buffer
500 };
501
502
503 struct pb_buffer *
504 fenced_buffer_create(struct fenced_buffer_list *fenced_list,
505 struct pb_buffer *buffer)
506 {
507 struct fenced_buffer *buf;
508
509 if(!buffer)
510 return NULL;
511
512 buf = CALLOC_STRUCT(fenced_buffer);
513 if(!buf) {
514 pb_reference(&buffer, NULL);
515 return NULL;
516 }
517
518 pipe_reference_init(&buf->base.base.reference, 1);
519 buf->base.base.alignment = buffer->base.alignment;
520 buf->base.base.usage = buffer->base.usage;
521 buf->base.base.size = buffer->base.size;
522
523 buf->base.vtbl = &fenced_buffer_vtbl;
524 buf->buffer = buffer;
525 buf->list = fenced_list;
526
527 pipe_mutex_init(buf->mutex);
528
529 #ifdef DEBUG
530 pipe_mutex_lock(fenced_list->mutex);
531 LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
532 ++fenced_list->numUnfenced;
533 pipe_mutex_unlock(fenced_list->mutex);
534 #endif
535
536 return &buf->base;
537 }
538
539
540 struct fenced_buffer_list *
541 fenced_buffer_list_create(struct pb_fence_ops *ops)
542 {
543 struct fenced_buffer_list *fenced_list;
544
545 fenced_list = CALLOC_STRUCT(fenced_buffer_list);
546 if (!fenced_list)
547 return NULL;
548
549 fenced_list->ops = ops;
550
551 LIST_INITHEAD(&fenced_list->delayed);
552 fenced_list->numDelayed = 0;
553
554 #ifdef DEBUG
555 LIST_INITHEAD(&fenced_list->unfenced);
556 fenced_list->numUnfenced = 0;
557 #endif
558
559 pipe_mutex_init(fenced_list->mutex);
560
561 return fenced_list;
562 }
563
564
565 void
566 fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
567 int wait)
568 {
569 pipe_mutex_lock(fenced_list->mutex);
570 fenced_buffer_list_check_free_locked(fenced_list, wait);
571 pipe_mutex_unlock(fenced_list->mutex);
572 }
573
574
575 #ifdef DEBUG
576 void
577 fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
578 {
579 struct pb_fence_ops *ops = fenced_list->ops;
580 struct list_head *curr, *next;
581 struct fenced_buffer *fenced_buf;
582
583 pipe_mutex_lock(fenced_list->mutex);
584
585 debug_printf("%10s %7s %7s %10s %s\n",
586 "buffer", "size", "refcount", "fence", "signalled");
587
588 curr = fenced_list->unfenced.next;
589 next = curr->next;
590 while(curr != &fenced_list->unfenced) {
591 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
592 pipe_mutex_lock(fenced_buf->mutex);
593 assert(!fenced_buf->fence);
594 debug_printf("%10p %7u %7u\n",
595 (void *) fenced_buf,
596 fenced_buf->base.base.size,
597 p_atomic_read(&fenced_buf->base.base.reference.count));
598 pipe_mutex_unlock(fenced_buf->mutex);
599 curr = next;
600 next = curr->next;
601 }
602
603 curr = fenced_list->delayed.next;
604 next = curr->next;
605 while(curr != &fenced_list->delayed) {
606 int signaled;
607 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
608 pipe_mutex_lock(fenced_buf->mutex);
609 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
610 debug_printf("%10p %7u %7u %10p %s\n",
611 (void *) fenced_buf,
612 fenced_buf->base.base.size,
613 p_atomic_read(&fenced_buf->base.base.reference.count),
614 (void *) fenced_buf->fence,
615 signaled == 0 ? "y" : "n");
616 pipe_mutex_unlock(fenced_buf->mutex);
617 curr = next;
618 next = curr->next;
619 }
620
621 pipe_mutex_unlock(fenced_list->mutex);
622 }
623 #endif
624
625
626 void
627 fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
628 {
629 pipe_mutex_lock(fenced_list->mutex);
630
631 /* Wait on outstanding fences */
632 while (fenced_list->numDelayed) {
633 pipe_mutex_unlock(fenced_list->mutex);
634 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
635 sched_yield();
636 #endif
637 pipe_mutex_lock(fenced_list->mutex);
638 fenced_buffer_list_check_free_locked(fenced_list, 1);
639 }
640
641 #ifdef DEBUG
642 /*assert(!fenced_list->numUnfenced);*/
643 #endif
644
645 pipe_mutex_unlock(fenced_list->mutex);
646 pipe_mutex_destroy(fenced_list->mutex);
647
648 fenced_list->ops->destroy(fenced_list->ops);
649
650 FREE(fenced_list);
651 }
652
653