1 /**************************************************************************
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Implementation of fenced buffers.
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 #include "pipe/p_config.h"
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "pipe/p_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
57 * Convenience macro (type safe).
59 #define SUPER(__derived) (&(__derived)->base)
62 struct fenced_buffer_list
66 struct pb_fence_ops
*ops
;
69 struct list_head delayed
;
73 struct list_head unfenced
;
79 * Wrapper around a pipe buffer which adds fencing and reference counting.
83 struct pb_buffer base
;
85 struct pb_buffer
*buffer
;
87 /* FIXME: protect access with mutex */
90 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
96 struct pb_validate
*vl
;
97 unsigned validation_flags
;
98 struct pipe_fence_handle
*fence
;
100 struct list_head head
;
101 struct fenced_buffer_list
*list
;
105 static INLINE
struct fenced_buffer
*
106 fenced_buffer(struct pb_buffer
*buf
)
109 return (struct fenced_buffer
*)buf
;
114 _fenced_buffer_add(struct fenced_buffer
*fenced_buf
)
116 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
118 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
119 assert(fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
120 assert(fenced_buf
->fence
);
123 LIST_DEL(&fenced_buf
->head
);
124 assert(fenced_list
->numUnfenced
);
125 --fenced_list
->numUnfenced
;
127 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_list
->delayed
);
128 ++fenced_list
->numDelayed
;
133 * Actually destroy the buffer.
136 _fenced_buffer_destroy(struct fenced_buffer
*fenced_buf
)
138 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
140 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
141 assert(!fenced_buf
->fence
);
143 assert(fenced_buf
->head
.prev
);
144 assert(fenced_buf
->head
.next
);
145 LIST_DEL(&fenced_buf
->head
);
146 assert(fenced_list
->numUnfenced
);
147 --fenced_list
->numUnfenced
;
151 pb_reference(&fenced_buf
->buffer
, NULL
);
157 _fenced_buffer_remove(struct fenced_buffer_list
*fenced_list
,
158 struct fenced_buffer
*fenced_buf
)
160 struct pb_fence_ops
*ops
= fenced_list
->ops
;
162 assert(fenced_buf
->fence
);
163 assert(fenced_buf
->list
== fenced_list
);
165 ops
->fence_reference(ops
, &fenced_buf
->fence
, NULL
);
166 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
168 assert(fenced_buf
->head
.prev
);
169 assert(fenced_buf
->head
.next
);
171 LIST_DEL(&fenced_buf
->head
);
172 assert(fenced_list
->numDelayed
);
173 --fenced_list
->numDelayed
;
176 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_list
->unfenced
);
177 ++fenced_list
->numUnfenced
;
184 if(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
))
185 _fenced_buffer_destroy(fenced_buf
);
189 static INLINE
enum pipe_error
190 _fenced_buffer_finish(struct fenced_buffer
*fenced_buf
)
192 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
193 struct pb_fence_ops
*ops
= fenced_list
->ops
;
196 debug_warning("waiting for GPU");
199 assert(fenced_buf
->fence
);
200 if(fenced_buf
->fence
) {
201 if(ops
->fence_finish(ops
, fenced_buf
->fence
, 0) != 0) {
204 /* Remove from the fenced list */
205 /* TODO: remove consequents */
206 _fenced_buffer_remove(fenced_list
, fenced_buf
);
209 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
215 * Free as many fenced buffers from the list head as possible.
218 _fenced_buffer_list_check_free(struct fenced_buffer_list
*fenced_list
,
221 struct pb_fence_ops
*ops
= fenced_list
->ops
;
222 struct list_head
*curr
, *next
;
223 struct fenced_buffer
*fenced_buf
;
224 struct pipe_fence_handle
*prev_fence
= NULL
;
226 curr
= fenced_list
->delayed
.next
;
228 while(curr
!= &fenced_list
->delayed
) {
229 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
231 if(fenced_buf
->fence
!= prev_fence
) {
234 signaled
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
236 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
239 prev_fence
= fenced_buf
->fence
;
242 assert(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0);
245 _fenced_buffer_remove(fenced_list
, fenced_buf
);
254 fenced_buffer_destroy(struct pb_buffer
*buf
)
256 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
257 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
259 pipe_mutex_lock(fenced_list
->mutex
);
260 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
261 if (fenced_buf
->fence
) {
262 struct pb_fence_ops
*ops
= fenced_list
->ops
;
263 if(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0) {
264 struct list_head
*curr
, *prev
;
265 curr
= &fenced_buf
->head
;
268 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
269 assert(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0);
270 _fenced_buffer_remove(fenced_list
, fenced_buf
);
273 } while (curr
!= &fenced_list
->delayed
);
276 /* delay destruction */
280 _fenced_buffer_destroy(fenced_buf
);
282 pipe_mutex_unlock(fenced_list
->mutex
);
287 fenced_buffer_map(struct pb_buffer
*buf
,
290 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
291 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
292 struct pb_fence_ops
*ops
= fenced_list
->ops
;
295 assert(!(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
297 /* Serialize writes */
298 if((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_WRITE
) ||
299 ((fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ
) && (flags
& PIPE_BUFFER_USAGE_CPU_WRITE
))) {
300 if(flags
& PIPE_BUFFER_USAGE_DONTBLOCK
) {
301 /* Don't wait for the GPU to finish writing */
302 if(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0)
303 _fenced_buffer_remove(fenced_list
, fenced_buf
);
308 /* Wait for the GPU to finish writing */
309 _fenced_buffer_finish(fenced_buf
);
314 /* Check for CPU write access (read is OK) */
315 if(fenced_buf
->flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
) {
316 /* this is legal -- just for debugging */
317 debug_warning("concurrent CPU writes");
321 map
= pb_map(fenced_buf
->buffer
, flags
);
323 ++fenced_buf
->mapcount
;
324 fenced_buf
->flags
|= flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
332 fenced_buffer_unmap(struct pb_buffer
*buf
)
334 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
335 assert(fenced_buf
->mapcount
);
336 if(fenced_buf
->mapcount
) {
337 pb_unmap(fenced_buf
->buffer
);
338 --fenced_buf
->mapcount
;
339 if(!fenced_buf
->mapcount
)
340 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
345 static enum pipe_error
346 fenced_buffer_validate(struct pb_buffer
*buf
,
347 struct pb_validate
*vl
,
350 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
355 fenced_buf
->vl
= NULL
;
356 fenced_buf
->validation_flags
= 0;
360 assert(flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
361 assert(!(flags
& ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
));
362 flags
&= PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
364 /* Buffer cannot be validated in two different lists */
365 if(fenced_buf
->vl
&& fenced_buf
->vl
!= vl
)
366 return PIPE_ERROR_RETRY
;
369 /* Do not validate if buffer is still mapped */
370 if(fenced_buf
->flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
) {
371 /* TODO: wait for the thread that mapped the buffer to unmap it */
372 return PIPE_ERROR_RETRY
;
374 /* Final sanity checking */
375 assert(!(fenced_buf
->flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
));
376 assert(!fenced_buf
->mapcount
);
379 if(fenced_buf
->vl
== vl
&&
380 (fenced_buf
->validation_flags
& flags
) == flags
) {
381 /* Nothing to do -- buffer already validated */
385 ret
= pb_validate(fenced_buf
->buffer
, vl
, flags
);
390 fenced_buf
->validation_flags
|= flags
;
397 fenced_buffer_fence(struct pb_buffer
*buf
,
398 struct pipe_fence_handle
*fence
)
400 struct fenced_buffer
*fenced_buf
;
401 struct fenced_buffer_list
*fenced_list
;
402 struct pb_fence_ops
*ops
;
404 fenced_buf
= fenced_buffer(buf
);
405 fenced_list
= fenced_buf
->list
;
406 ops
= fenced_list
->ops
;
408 if(fence
== fenced_buf
->fence
) {
413 assert(fenced_buf
->vl
);
414 assert(fenced_buf
->validation_flags
);
416 pipe_mutex_lock(fenced_list
->mutex
);
417 if (fenced_buf
->fence
)
418 _fenced_buffer_remove(fenced_list
, fenced_buf
);
420 ops
->fence_reference(ops
, &fenced_buf
->fence
, fence
);
421 fenced_buf
->flags
|= fenced_buf
->validation_flags
;
422 _fenced_buffer_add(fenced_buf
);
424 pipe_mutex_unlock(fenced_list
->mutex
);
426 pb_fence(fenced_buf
->buffer
, fence
);
428 fenced_buf
->vl
= NULL
;
429 fenced_buf
->validation_flags
= 0;
434 fenced_buffer_get_base_buffer(struct pb_buffer
*buf
,
435 struct pb_buffer
**base_buf
,
438 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
439 pb_get_base_buffer(fenced_buf
->buffer
, base_buf
, offset
);
443 static const struct pb_vtbl
444 fenced_buffer_vtbl
= {
445 fenced_buffer_destroy
,
448 fenced_buffer_validate
,
450 fenced_buffer_get_base_buffer
455 fenced_buffer_create(struct fenced_buffer_list
*fenced_list
,
456 struct pb_buffer
*buffer
)
458 struct fenced_buffer
*buf
;
463 buf
= CALLOC_STRUCT(fenced_buffer
);
465 pb_reference(&buffer
, NULL
);
469 pipe_reference_init(&buf
->base
.base
.reference
, 1);
470 buf
->base
.base
.alignment
= buffer
->base
.alignment
;
471 buf
->base
.base
.usage
= buffer
->base
.usage
;
472 buf
->base
.base
.size
= buffer
->base
.size
;
474 buf
->base
.vtbl
= &fenced_buffer_vtbl
;
475 buf
->buffer
= buffer
;
476 buf
->list
= fenced_list
;
479 pipe_mutex_lock(fenced_list
->mutex
);
480 LIST_ADDTAIL(&buf
->head
, &fenced_list
->unfenced
);
481 ++fenced_list
->numUnfenced
;
482 pipe_mutex_unlock(fenced_list
->mutex
);
489 struct fenced_buffer_list
*
490 fenced_buffer_list_create(struct pb_fence_ops
*ops
)
492 struct fenced_buffer_list
*fenced_list
;
494 fenced_list
= CALLOC_STRUCT(fenced_buffer_list
);
498 fenced_list
->ops
= ops
;
500 LIST_INITHEAD(&fenced_list
->delayed
);
501 fenced_list
->numDelayed
= 0;
504 LIST_INITHEAD(&fenced_list
->unfenced
);
505 fenced_list
->numUnfenced
= 0;
508 pipe_mutex_init(fenced_list
->mutex
);
515 fenced_buffer_list_check_free(struct fenced_buffer_list
*fenced_list
,
518 pipe_mutex_lock(fenced_list
->mutex
);
519 _fenced_buffer_list_check_free(fenced_list
, wait
);
520 pipe_mutex_unlock(fenced_list
->mutex
);
526 fenced_buffer_list_dump(struct fenced_buffer_list
*fenced_list
)
528 struct pb_fence_ops
*ops
= fenced_list
->ops
;
529 struct list_head
*curr
, *next
;
530 struct fenced_buffer
*fenced_buf
;
532 pipe_mutex_lock(fenced_list
->mutex
);
534 debug_printf("%10s %7s %7s %10s %s\n",
535 "buffer", "size", "refcount", "fence", "signalled");
537 curr
= fenced_list
->unfenced
.next
;
539 while(curr
!= &fenced_list
->unfenced
) {
540 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
541 assert(!fenced_buf
->fence
);
542 debug_printf("%10p %7u %7u\n",
544 fenced_buf
->base
.base
.size
,
545 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
));
550 curr
= fenced_list
->delayed
.next
;
552 while(curr
!= &fenced_list
->delayed
) {
554 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
555 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
556 debug_printf("%10p %7u %7u %10p %s\n",
558 fenced_buf
->base
.base
.size
,
559 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
),
560 (void *) fenced_buf
->fence
,
561 signaled
== 0 ? "y" : "n");
566 pipe_mutex_unlock(fenced_list
->mutex
);
572 fenced_buffer_list_destroy(struct fenced_buffer_list
*fenced_list
)
574 pipe_mutex_lock(fenced_list
->mutex
);
576 /* Wait on outstanding fences */
577 while (fenced_list
->numDelayed
) {
578 pipe_mutex_unlock(fenced_list
->mutex
);
579 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
582 _fenced_buffer_list_check_free(fenced_list
, 1);
583 pipe_mutex_lock(fenced_list
->mutex
);
587 /*assert(!fenced_list->numUnfenced);*/
590 pipe_mutex_unlock(fenced_list
->mutex
);
592 fenced_list
->ops
->destroy(fenced_list
->ops
);