ba6f7b15f9ec5ec48dfcde867d67b48a688f37ea
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 */
35
36
37 #include "pipe/p_config.h"
38
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40 #include <unistd.h>
41 #include <sched.h>
42 #endif
43
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "pipe/p_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
50
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
53
54
55
56 /**
57 * Convenience macro (type safe).
58 */
59 #define SUPER(__derived) (&(__derived)->base)
60
61
62 struct fenced_buffer_list
63 {
64 pipe_mutex mutex;
65
66 struct pb_fence_ops *ops;
67
68 pb_size numDelayed;
69 struct list_head delayed;
70
71 #ifdef DEBUG
72 pb_size numUnfenced;
73 struct list_head unfenced;
74 #endif
75 };
76
77
78 /**
79 * Wrapper around a pipe buffer which adds fencing and reference counting.
80 */
81 struct fenced_buffer
82 {
83 /*
84 * Immutable members.
85 */
86
87 struct pb_buffer base;
88 struct pb_buffer *buffer;
89 struct fenced_buffer_list *list;
90
91 /**
92 * Protected by fenced_buffer_list::mutex
93 */
94 struct list_head head;
95
96 /**
97 * Following members are mutable and protected by this mutex.
98 *
99 * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
100 * held, but in order to prevent deadlocks you must never lock
101 * fenced_buffer_list::mutex with this mutex held.
102 */
103 pipe_mutex mutex;
104
105 /**
106 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
107 * buffer usage.
108 */
109 unsigned flags;
110
111 unsigned mapcount;
112 struct pb_validate *vl;
113 unsigned validation_flags;
114 struct pipe_fence_handle *fence;
115 };
116
117
118 static INLINE struct fenced_buffer *
119 fenced_buffer(struct pb_buffer *buf)
120 {
121 assert(buf);
122 return (struct fenced_buffer *)buf;
123 }
124
125
126 /**
127 * Add the buffer to the fenced list.
128 *
129 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
130 * order, before calling this function.
131 *
132 * Reference count should be incremented before calling this function.
133 */
134 static INLINE void
135 fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list,
136 struct fenced_buffer *fenced_buf)
137 {
138 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
139 assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
140 assert(fenced_buf->fence);
141
142 /* TODO: Move the reference count increment here */
143
144 #ifdef DEBUG
145 LIST_DEL(&fenced_buf->head);
146 assert(fenced_list->numUnfenced);
147 --fenced_list->numUnfenced;
148 #endif
149 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
150 ++fenced_list->numDelayed;
151 }
152
153
154 /**
155 * Remove the buffer from the fenced list.
156 *
157 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
158 * order before calling this function.
159 *
160 * Reference count should be decremented after calling this function.
161 */
162 static INLINE void
163 fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
164 struct fenced_buffer *fenced_buf)
165 {
166 struct pb_fence_ops *ops = fenced_list->ops;
167
168 assert(fenced_buf->fence);
169 assert(fenced_buf->list == fenced_list);
170
171 ops->fence_reference(ops, &fenced_buf->fence, NULL);
172 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
173
174 assert(fenced_buf->head.prev);
175 assert(fenced_buf->head.next);
176
177 LIST_DEL(&fenced_buf->head);
178 assert(fenced_list->numDelayed);
179 --fenced_list->numDelayed;
180
181 #ifdef DEBUG
182 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
183 ++fenced_list->numUnfenced;
184 #endif
185
186 /* TODO: Move the reference count decrement and destruction here */
187 }
188
189
190 /**
191 * Wait for the fence to expire, and remove it from the fenced list.
192 *
193 * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be
194 * held -- it will be acquired internally.
195 */
196 static INLINE enum pipe_error
197 fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
198 struct fenced_buffer *fenced_buf)
199 {
200 struct pb_fence_ops *ops = fenced_list->ops;
201 enum pipe_error ret = PIPE_ERROR;
202
203 #if 0
204 debug_warning("waiting for GPU");
205 #endif
206
207 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
208 assert(fenced_buf->fence);
209
210 /*
211 * Acquire the global lock. Must release buffer mutex first to preserve
212 * lock order.
213 */
214 pipe_mutex_unlock(fenced_buf->mutex);
215 pipe_mutex_lock(fenced_list->mutex);
216 pipe_mutex_lock(fenced_buf->mutex);
217
218 if(fenced_buf->fence) {
219 if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
220 /* Remove from the fenced list */
221 /* TODO: remove consequents */
222 fenced_buffer_remove_locked(fenced_list, fenced_buf);
223
224 p_atomic_dec(&fenced_buf->base.base.reference.count);
225 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
226
227 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
228
229 ret = PIPE_OK;
230 }
231 }
232
233 pipe_mutex_unlock(fenced_list->mutex);
234
235 return ret;
236 }
237
238
239 /**
240 * Free as many fenced buffers from the list head as possible.
241 */
242 static void
243 fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list,
244 int wait)
245 {
246 struct pb_fence_ops *ops = fenced_list->ops;
247 struct list_head *curr, *next;
248 struct fenced_buffer *fenced_buf;
249 struct pb_buffer *pb_buf;
250 struct pipe_fence_handle *prev_fence = NULL;
251
252 curr = fenced_list->delayed.next;
253 next = curr->next;
254 while(curr != &fenced_list->delayed) {
255 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
256
257 pipe_mutex_lock(fenced_buf->mutex);
258
259 if(fenced_buf->fence != prev_fence) {
260 int signaled;
261 if (wait)
262 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
263 else
264 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
265 if (signaled != 0) {
266 pipe_mutex_unlock(fenced_buf->mutex);
267 break;
268 }
269 prev_fence = fenced_buf->fence;
270 }
271 else {
272 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
273 }
274
275 fenced_buffer_remove_locked(fenced_list, fenced_buf);
276 pipe_mutex_unlock(fenced_buf->mutex);
277
278 pb_buf = &fenced_buf->base;
279 pb_reference(&pb_buf, NULL);
280
281 curr = next;
282 next = curr->next;
283 }
284 }
285
286
287 static void
288 fenced_buffer_destroy(struct pb_buffer *buf)
289 {
290 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
291 struct fenced_buffer_list *fenced_list = fenced_buf->list;
292
293 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
294 assert(!fenced_buf->fence);
295
296 #ifdef DEBUG
297 pipe_mutex_lock(fenced_list->mutex);
298 assert(fenced_buf->head.prev);
299 assert(fenced_buf->head.next);
300 LIST_DEL(&fenced_buf->head);
301 assert(fenced_list->numUnfenced);
302 --fenced_list->numUnfenced;
303 pipe_mutex_unlock(fenced_list->mutex);
304 #else
305 (void)fenced_list;
306 #endif
307
308 pb_reference(&fenced_buf->buffer, NULL);
309
310 pipe_mutex_destroy(fenced_buf->mutex);
311 FREE(fenced_buf);
312 }
313
314
315 static void *
316 fenced_buffer_map(struct pb_buffer *buf,
317 unsigned flags)
318 {
319 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
320 struct fenced_buffer_list *fenced_list = fenced_buf->list;
321 struct pb_fence_ops *ops = fenced_list->ops;
322 void *map = NULL;
323
324 pipe_mutex_lock(fenced_buf->mutex);
325
326 assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
327
328 /* Serialize writes */
329 if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
330 ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
331 if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
332 ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
333 /* Don't wait for the GPU to finish writing */
334 goto done;
335 }
336
337 /* Wait for the GPU to finish writing */
338 fenced_buffer_finish_locked(fenced_list, fenced_buf);
339 }
340
341 #if 0
342 /* Check for CPU write access (read is OK) */
343 if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
344 /* this is legal -- just for debugging */
345 debug_warning("concurrent CPU writes");
346 }
347 #endif
348
349 map = pb_map(fenced_buf->buffer, flags);
350 if(map) {
351 ++fenced_buf->mapcount;
352 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
353 }
354
355 done:
356 pipe_mutex_unlock(fenced_buf->mutex);
357
358 return map;
359 }
360
361
362 static void
363 fenced_buffer_unmap(struct pb_buffer *buf)
364 {
365 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
366
367 pipe_mutex_lock(fenced_buf->mutex);
368
369 assert(fenced_buf->mapcount);
370 if(fenced_buf->mapcount) {
371 pb_unmap(fenced_buf->buffer);
372 --fenced_buf->mapcount;
373 if(!fenced_buf->mapcount)
374 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
375 }
376
377 pipe_mutex_unlock(fenced_buf->mutex);
378 }
379
380
381 static enum pipe_error
382 fenced_buffer_validate(struct pb_buffer *buf,
383 struct pb_validate *vl,
384 unsigned flags)
385 {
386 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
387 enum pipe_error ret;
388
389 pipe_mutex_lock(fenced_buf->mutex);
390
391 if(!vl) {
392 /* invalidate */
393 fenced_buf->vl = NULL;
394 fenced_buf->validation_flags = 0;
395 ret = PIPE_OK;
396 goto done;
397 }
398
399 assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
400 assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
401 flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
402
403 /* Buffer cannot be validated in two different lists */
404 if(fenced_buf->vl && fenced_buf->vl != vl) {
405 ret = PIPE_ERROR_RETRY;
406 goto done;
407 }
408
409 #if 0
410 /* Do not validate if buffer is still mapped */
411 if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
412 /* TODO: wait for the thread that mapped the buffer to unmap it */
413 ret = PIPE_ERROR_RETRY;
414 goto done;
415 }
416 /* Final sanity checking */
417 assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
418 assert(!fenced_buf->mapcount);
419 #endif
420
421 if(fenced_buf->vl == vl &&
422 (fenced_buf->validation_flags & flags) == flags) {
423 /* Nothing to do -- buffer already validated */
424 ret = PIPE_OK;
425 goto done;
426 }
427
428 ret = pb_validate(fenced_buf->buffer, vl, flags);
429 if (ret != PIPE_OK)
430 goto done;
431
432 fenced_buf->vl = vl;
433 fenced_buf->validation_flags |= flags;
434
435 done:
436 pipe_mutex_unlock(fenced_buf->mutex);
437
438 return ret;
439 }
440
441
442 static void
443 fenced_buffer_fence(struct pb_buffer *buf,
444 struct pipe_fence_handle *fence)
445 {
446 struct fenced_buffer *fenced_buf;
447 struct fenced_buffer_list *fenced_list;
448 struct pb_fence_ops *ops;
449
450 fenced_buf = fenced_buffer(buf);
451 fenced_list = fenced_buf->list;
452 ops = fenced_list->ops;
453
454 pipe_mutex_lock(fenced_list->mutex);
455 pipe_mutex_lock(fenced_buf->mutex);
456
457 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
458
459 if(fence != fenced_buf->fence) {
460 assert(fenced_buf->vl);
461 assert(fenced_buf->validation_flags);
462
463 if (fenced_buf->fence) {
464 fenced_buffer_remove_locked(fenced_list, fenced_buf);
465 p_atomic_dec(&fenced_buf->base.base.reference.count);
466 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
467 }
468 if (fence) {
469 ops->fence_reference(ops, &fenced_buf->fence, fence);
470 fenced_buf->flags |= fenced_buf->validation_flags;
471 p_atomic_inc(&fenced_buf->base.base.reference.count);
472 fenced_buffer_add_locked(fenced_list, fenced_buf);
473 }
474
475 pb_fence(fenced_buf->buffer, fence);
476
477 fenced_buf->vl = NULL;
478 fenced_buf->validation_flags = 0;
479 }
480
481 pipe_mutex_unlock(fenced_buf->mutex);
482 pipe_mutex_unlock(fenced_list->mutex);
483 }
484
485
486 static void
487 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
488 struct pb_buffer **base_buf,
489 pb_size *offset)
490 {
491 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
492 /* NOTE: accesses immutable members only -- mutex not necessary */
493 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
494 }
495
496
497 static const struct pb_vtbl
498 fenced_buffer_vtbl = {
499 fenced_buffer_destroy,
500 fenced_buffer_map,
501 fenced_buffer_unmap,
502 fenced_buffer_validate,
503 fenced_buffer_fence,
504 fenced_buffer_get_base_buffer
505 };
506
507
508 struct pb_buffer *
509 fenced_buffer_create(struct fenced_buffer_list *fenced_list,
510 struct pb_buffer *buffer)
511 {
512 struct fenced_buffer *buf;
513
514 if(!buffer)
515 return NULL;
516
517 buf = CALLOC_STRUCT(fenced_buffer);
518 if(!buf) {
519 pb_reference(&buffer, NULL);
520 return NULL;
521 }
522
523 pipe_reference_init(&buf->base.base.reference, 1);
524 buf->base.base.alignment = buffer->base.alignment;
525 buf->base.base.usage = buffer->base.usage;
526 buf->base.base.size = buffer->base.size;
527
528 buf->base.vtbl = &fenced_buffer_vtbl;
529 buf->buffer = buffer;
530 buf->list = fenced_list;
531
532 pipe_mutex_init(buf->mutex);
533
534 #ifdef DEBUG
535 pipe_mutex_lock(fenced_list->mutex);
536 LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
537 ++fenced_list->numUnfenced;
538 pipe_mutex_unlock(fenced_list->mutex);
539 #endif
540
541 return &buf->base;
542 }
543
544
545 struct fenced_buffer_list *
546 fenced_buffer_list_create(struct pb_fence_ops *ops)
547 {
548 struct fenced_buffer_list *fenced_list;
549
550 fenced_list = CALLOC_STRUCT(fenced_buffer_list);
551 if (!fenced_list)
552 return NULL;
553
554 fenced_list->ops = ops;
555
556 LIST_INITHEAD(&fenced_list->delayed);
557 fenced_list->numDelayed = 0;
558
559 #ifdef DEBUG
560 LIST_INITHEAD(&fenced_list->unfenced);
561 fenced_list->numUnfenced = 0;
562 #endif
563
564 pipe_mutex_init(fenced_list->mutex);
565
566 return fenced_list;
567 }
568
569
570 void
571 fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
572 int wait)
573 {
574 pipe_mutex_lock(fenced_list->mutex);
575 fenced_buffer_list_check_free_locked(fenced_list, wait);
576 pipe_mutex_unlock(fenced_list->mutex);
577 }
578
579
580 #ifdef DEBUG
581 void
582 fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
583 {
584 struct pb_fence_ops *ops = fenced_list->ops;
585 struct list_head *curr, *next;
586 struct fenced_buffer *fenced_buf;
587
588 pipe_mutex_lock(fenced_list->mutex);
589
590 debug_printf("%10s %7s %7s %10s %s\n",
591 "buffer", "size", "refcount", "fence", "signalled");
592
593 curr = fenced_list->unfenced.next;
594 next = curr->next;
595 while(curr != &fenced_list->unfenced) {
596 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
597 pipe_mutex_lock(fenced_buf->mutex);
598 assert(!fenced_buf->fence);
599 debug_printf("%10p %7u %7u\n",
600 (void *) fenced_buf,
601 fenced_buf->base.base.size,
602 p_atomic_read(&fenced_buf->base.base.reference.count));
603 pipe_mutex_unlock(fenced_buf->mutex);
604 curr = next;
605 next = curr->next;
606 }
607
608 curr = fenced_list->delayed.next;
609 next = curr->next;
610 while(curr != &fenced_list->delayed) {
611 int signaled;
612 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
613 pipe_mutex_lock(fenced_buf->mutex);
614 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
615 debug_printf("%10p %7u %7u %10p %s\n",
616 (void *) fenced_buf,
617 fenced_buf->base.base.size,
618 p_atomic_read(&fenced_buf->base.base.reference.count),
619 (void *) fenced_buf->fence,
620 signaled == 0 ? "y" : "n");
621 pipe_mutex_unlock(fenced_buf->mutex);
622 curr = next;
623 next = curr->next;
624 }
625
626 pipe_mutex_unlock(fenced_list->mutex);
627 }
628 #endif
629
630
631 void
632 fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
633 {
634 pipe_mutex_lock(fenced_list->mutex);
635
636 /* Wait on outstanding fences */
637 while (fenced_list->numDelayed) {
638 pipe_mutex_unlock(fenced_list->mutex);
639 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
640 sched_yield();
641 #endif
642 pipe_mutex_lock(fenced_list->mutex);
643 fenced_buffer_list_check_free_locked(fenced_list, 1);
644 }
645
646 #ifdef DEBUG
647 /*assert(!fenced_list->numUnfenced);*/
648 #endif
649
650 pipe_mutex_unlock(fenced_list->mutex);
651 pipe_mutex_destroy(fenced_list->mutex);
652
653 fenced_list->ops->destroy(fenced_list->ops);
654
655 FREE(fenced_list);
656 }
657
658