1 /**************************************************************************
3 * Copyright 2007-2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Implementation of fenced buffers.
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 #include "pipe/p_config.h"
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "pipe/p_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
57 * Convenience macro (type safe).
59 #define SUPER(__derived) (&(__derived)->base)
62 struct fenced_buffer_list
66 struct pb_fence_ops
*ops
;
69 struct list_head delayed
;
73 struct list_head unfenced
;
79 * Wrapper around a pipe buffer which adds fencing and reference counting.
87 struct pb_buffer base
;
88 struct pb_buffer
*buffer
;
89 struct fenced_buffer_list
*list
;
92 * Protected by fenced_buffer_list::mutex
94 struct list_head head
;
97 * Following members are mutable and protected by this mutex.
99 * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
100 * held, but in order to prevent deadlocks you must never lock
101 * fenced_buffer_list::mutex with this mutex held.
106 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
112 struct pb_validate
*vl
;
113 unsigned validation_flags
;
114 struct pipe_fence_handle
*fence
;
118 static INLINE
struct fenced_buffer
*
119 fenced_buffer(struct pb_buffer
*buf
)
122 return (struct fenced_buffer
*)buf
;
127 * Add the buffer to the fenced list.
129 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
130 * order before calling this function.
132 * Reference count should be incremented before calling this function.
135 fenced_buffer_add_locked(struct fenced_buffer_list
*fenced_list
,
136 struct fenced_buffer
*fenced_buf
)
138 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
139 assert(fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
140 assert(fenced_buf
->fence
);
142 /* TODO: Move the reference count increment here */
145 LIST_DEL(&fenced_buf
->head
);
146 assert(fenced_list
->numUnfenced
);
147 --fenced_list
->numUnfenced
;
149 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_list
->delayed
);
150 ++fenced_list
->numDelayed
;
155 * Remove the buffer from the fenced list.
157 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
158 * order before calling this function.
160 * Reference count should be decremented after calling this function.
163 fenced_buffer_remove_locked(struct fenced_buffer_list
*fenced_list
,
164 struct fenced_buffer
*fenced_buf
)
166 struct pb_fence_ops
*ops
= fenced_list
->ops
;
168 assert(fenced_buf
->fence
);
169 assert(fenced_buf
->list
== fenced_list
);
171 ops
->fence_reference(ops
, &fenced_buf
->fence
, NULL
);
172 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
174 assert(fenced_buf
->head
.prev
);
175 assert(fenced_buf
->head
.next
);
177 LIST_DEL(&fenced_buf
->head
);
178 assert(fenced_list
->numDelayed
);
179 --fenced_list
->numDelayed
;
182 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_list
->unfenced
);
183 ++fenced_list
->numUnfenced
;
186 /* TODO: Move the reference count decrement and destruction here */
191 * Wait for the fence to expire, and remove it from the fenced list.
193 * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be
196 static INLINE
enum pipe_error
197 fenced_buffer_finish_locked(struct fenced_buffer_list
*fenced_list
,
198 struct fenced_buffer
*fenced_buf
)
200 struct pb_fence_ops
*ops
= fenced_list
->ops
;
201 enum pipe_error ret
= PIPE_ERROR
;
204 debug_warning("waiting for GPU");
207 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
208 assert(fenced_buf
->fence
);
210 /* Acquire the global lock */
211 pipe_mutex_unlock(fenced_buf
->mutex
);
212 pipe_mutex_lock(fenced_list
->mutex
);
213 pipe_mutex_lock(fenced_buf
->mutex
);
215 if(fenced_buf
->fence
) {
216 if(ops
->fence_finish(ops
, fenced_buf
->fence
, 0) == 0) {
217 /* Remove from the fenced list */
218 /* TODO: remove consequents */
219 fenced_buffer_remove_locked(fenced_list
, fenced_buf
);
221 p_atomic_dec(&fenced_buf
->base
.base
.reference
.count
);
222 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
224 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
230 pipe_mutex_unlock(fenced_list
->mutex
);
237 * Free as many fenced buffers from the list head as possible.
240 fenced_buffer_list_check_free_locked(struct fenced_buffer_list
*fenced_list
,
243 struct pb_fence_ops
*ops
= fenced_list
->ops
;
244 struct list_head
*curr
, *next
;
245 struct fenced_buffer
*fenced_buf
;
246 struct pb_buffer
*pb_buf
;
247 struct pipe_fence_handle
*prev_fence
= NULL
;
249 curr
= fenced_list
->delayed
.next
;
251 while(curr
!= &fenced_list
->delayed
) {
252 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
254 pipe_mutex_lock(fenced_buf
->mutex
);
256 if(fenced_buf
->fence
!= prev_fence
) {
259 signaled
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
261 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
263 pipe_mutex_unlock(fenced_buf
->mutex
);
266 prev_fence
= fenced_buf
->fence
;
269 assert(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0);
272 fenced_buffer_remove_locked(fenced_list
, fenced_buf
);
273 pipe_mutex_unlock(fenced_buf
->mutex
);
275 pb_buf
= &fenced_buf
->base
;
276 pb_reference(&pb_buf
, NULL
);
286 fenced_buffer_destroy(struct pb_buffer
*buf
)
288 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
289 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
291 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
292 assert(!fenced_buf
->fence
);
295 pipe_mutex_lock(fenced_list
->mutex
);
296 assert(fenced_buf
->head
.prev
);
297 assert(fenced_buf
->head
.next
);
298 LIST_DEL(&fenced_buf
->head
);
299 assert(fenced_list
->numUnfenced
);
300 --fenced_list
->numUnfenced
;
301 pipe_mutex_unlock(fenced_list
->mutex
);
306 pb_reference(&fenced_buf
->buffer
, NULL
);
308 pipe_mutex_destroy(fenced_buf
->mutex
);
314 fenced_buffer_map(struct pb_buffer
*buf
,
317 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
318 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
319 struct pb_fence_ops
*ops
= fenced_list
->ops
;
322 pipe_mutex_lock(fenced_buf
->mutex
);
324 assert(!(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
326 /* Serialize writes */
327 if((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_WRITE
) ||
328 ((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ
) && (flags
& PIPE_BUFFER_USAGE_CPU_WRITE
))) {
329 if((flags
& PIPE_BUFFER_USAGE_DONTBLOCK
) &&
330 ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0) {
331 /* Don't wait for the GPU to finish writing */
335 /* Wait for the GPU to finish writing */
336 fenced_buffer_finish_locked(fenced_list
, fenced_buf
);
340 /* Check for CPU write access (read is OK) */
341 if(fenced_buf
->flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
) {
342 /* this is legal -- just for debugging */
343 debug_warning("concurrent CPU writes");
347 map
= pb_map(fenced_buf
->buffer
, flags
);
349 ++fenced_buf
->mapcount
;
350 fenced_buf
->flags
|= flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
354 pipe_mutex_unlock(fenced_buf
->mutex
);
361 fenced_buffer_unmap(struct pb_buffer
*buf
)
363 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
365 pipe_mutex_lock(fenced_buf
->mutex
);
367 assert(fenced_buf
->mapcount
);
368 if(fenced_buf
->mapcount
) {
369 pb_unmap(fenced_buf
->buffer
);
370 --fenced_buf
->mapcount
;
371 if(!fenced_buf
->mapcount
)
372 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
375 pipe_mutex_unlock(fenced_buf
->mutex
);
379 static enum pipe_error
380 fenced_buffer_validate(struct pb_buffer
*buf
,
381 struct pb_validate
*vl
,
384 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
387 pipe_mutex_lock(fenced_buf
->mutex
);
391 fenced_buf
->vl
= NULL
;
392 fenced_buf
->validation_flags
= 0;
397 assert(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
398 assert(!(flags
& ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
399 flags
&= PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
401 /* Buffer cannot be validated in two different lists */
402 if(fenced_buf
->vl
&& fenced_buf
->vl
!= vl
) {
403 ret
= PIPE_ERROR_RETRY
;
408 /* Do not validate if buffer is still mapped */
409 if(fenced_buf
->flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
) {
410 /* TODO: wait for the thread that mapped the buffer to unmap it */
411 ret
= PIPE_ERROR_RETRY
;
414 /* Final sanity checking */
415 assert(!(fenced_buf
->flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
));
416 assert(!fenced_buf
->mapcount
);
419 if(fenced_buf
->vl
== vl
&&
420 (fenced_buf
->validation_flags
& flags
) == flags
) {
421 /* Nothing to do -- buffer already validated */
426 ret
= pb_validate(fenced_buf
->buffer
, vl
, flags
);
431 fenced_buf
->validation_flags
|= flags
;
434 pipe_mutex_unlock(fenced_buf
->mutex
);
441 fenced_buffer_fence(struct pb_buffer
*buf
,
442 struct pipe_fence_handle
*fence
)
444 struct fenced_buffer
*fenced_buf
;
445 struct fenced_buffer_list
*fenced_list
;
446 struct pb_fence_ops
*ops
;
448 fenced_buf
= fenced_buffer(buf
);
449 fenced_list
= fenced_buf
->list
;
450 ops
= fenced_list
->ops
;
452 pipe_mutex_lock(fenced_list
->mutex
);
453 pipe_mutex_lock(fenced_buf
->mutex
);
455 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
457 if(fence
!= fenced_buf
->fence
) {
458 assert(fenced_buf
->vl
);
459 assert(fenced_buf
->validation_flags
);
461 if (fenced_buf
->fence
) {
462 fenced_buffer_remove_locked(fenced_list
, fenced_buf
);
463 p_atomic_dec(&fenced_buf
->base
.base
.reference
.count
);
464 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
467 ops
->fence_reference(ops
, &fenced_buf
->fence
, fence
);
468 fenced_buf
->flags
|= fenced_buf
->validation_flags
;
469 p_atomic_inc(&fenced_buf
->base
.base
.reference
.count
);
470 fenced_buffer_add_locked(fenced_list
, fenced_buf
);
473 pb_fence(fenced_buf
->buffer
, fence
);
475 fenced_buf
->vl
= NULL
;
476 fenced_buf
->validation_flags
= 0;
479 pipe_mutex_unlock(fenced_buf
->mutex
);
480 pipe_mutex_unlock(fenced_list
->mutex
);
485 fenced_buffer_get_base_buffer(struct pb_buffer
*buf
,
486 struct pb_buffer
**base_buf
,
489 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
490 /* NOTE: accesses immutable members only -- mutex not necessary */
491 pb_get_base_buffer(fenced_buf
->buffer
, base_buf
, offset
);
495 static const struct pb_vtbl
496 fenced_buffer_vtbl
= {
497 fenced_buffer_destroy
,
500 fenced_buffer_validate
,
502 fenced_buffer_get_base_buffer
507 fenced_buffer_create(struct fenced_buffer_list
*fenced_list
,
508 struct pb_buffer
*buffer
)
510 struct fenced_buffer
*buf
;
515 buf
= CALLOC_STRUCT(fenced_buffer
);
517 pb_reference(&buffer
, NULL
);
521 pipe_reference_init(&buf
->base
.base
.reference
, 1);
522 buf
->base
.base
.alignment
= buffer
->base
.alignment
;
523 buf
->base
.base
.usage
= buffer
->base
.usage
;
524 buf
->base
.base
.size
= buffer
->base
.size
;
526 buf
->base
.vtbl
= &fenced_buffer_vtbl
;
527 buf
->buffer
= buffer
;
528 buf
->list
= fenced_list
;
530 pipe_mutex_init(buf
->mutex
);
533 pipe_mutex_lock(fenced_list
->mutex
);
534 LIST_ADDTAIL(&buf
->head
, &fenced_list
->unfenced
);
535 ++fenced_list
->numUnfenced
;
536 pipe_mutex_unlock(fenced_list
->mutex
);
543 struct fenced_buffer_list
*
544 fenced_buffer_list_create(struct pb_fence_ops
*ops
)
546 struct fenced_buffer_list
*fenced_list
;
548 fenced_list
= CALLOC_STRUCT(fenced_buffer_list
);
552 fenced_list
->ops
= ops
;
554 LIST_INITHEAD(&fenced_list
->delayed
);
555 fenced_list
->numDelayed
= 0;
558 LIST_INITHEAD(&fenced_list
->unfenced
);
559 fenced_list
->numUnfenced
= 0;
562 pipe_mutex_init(fenced_list
->mutex
);
569 fenced_buffer_list_check_free(struct fenced_buffer_list
*fenced_list
,
572 pipe_mutex_lock(fenced_list
->mutex
);
573 fenced_buffer_list_check_free_locked(fenced_list
, wait
);
574 pipe_mutex_unlock(fenced_list
->mutex
);
580 fenced_buffer_list_dump(struct fenced_buffer_list
*fenced_list
)
582 struct pb_fence_ops
*ops
= fenced_list
->ops
;
583 struct list_head
*curr
, *next
;
584 struct fenced_buffer
*fenced_buf
;
586 pipe_mutex_lock(fenced_list
->mutex
);
588 debug_printf("%10s %7s %7s %10s %s\n",
589 "buffer", "size", "refcount", "fence", "signalled");
591 curr
= fenced_list
->unfenced
.next
;
593 while(curr
!= &fenced_list
->unfenced
) {
594 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
595 pipe_mutex_lock(fenced_buf
->mutex
);
596 assert(!fenced_buf
->fence
);
597 debug_printf("%10p %7u %7u\n",
599 fenced_buf
->base
.base
.size
,
600 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
));
601 pipe_mutex_unlock(fenced_buf
->mutex
);
606 curr
= fenced_list
->delayed
.next
;
608 while(curr
!= &fenced_list
->delayed
) {
610 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
611 pipe_mutex_lock(fenced_buf
->mutex
);
612 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
613 debug_printf("%10p %7u %7u %10p %s\n",
615 fenced_buf
->base
.base
.size
,
616 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
),
617 (void *) fenced_buf
->fence
,
618 signaled
== 0 ? "y" : "n");
619 pipe_mutex_unlock(fenced_buf
->mutex
);
624 pipe_mutex_unlock(fenced_list
->mutex
);
630 fenced_buffer_list_destroy(struct fenced_buffer_list
*fenced_list
)
632 pipe_mutex_lock(fenced_list
->mutex
);
634 /* Wait on outstanding fences */
635 while (fenced_list
->numDelayed
) {
636 pipe_mutex_unlock(fenced_list
->mutex
);
637 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
640 pipe_mutex_lock(fenced_list
->mutex
);
641 fenced_buffer_list_check_free_locked(fenced_list
, 1);
645 /*assert(!fenced_list->numUnfenced);*/
648 pipe_mutex_unlock(fenced_list
->mutex
);
649 pipe_mutex_destroy(fenced_list
->mutex
);
651 fenced_list
->ops
->destroy(fenced_list
->ops
);