1 /**************************************************************************
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Implementation of fenced buffers.
32 * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 #include "pipe/p_config.h"
39 #if defined(PIPE_OS_LINUX)
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_error.h"
46 #include "pipe/p_debug.h"
47 #include "pipe/p_winsys.h"
48 #include "pipe/p_thread.h"
49 #include "util/u_memory.h"
50 #include "util/u_double_list.h"
52 #include "pb_buffer.h"
53 #include "pb_buffer_fenced.h"
58 * Convenience macro (type safe).
60 #define SUPER(__derived) (&(__derived)->base)
62 #define PIPE_BUFFER_USAGE_CPU_READ_WRITE \
63 ( PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE )
64 #define PIPE_BUFFER_USAGE_GPU_READ_WRITE \
65 ( PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE )
66 #define PIPE_BUFFER_USAGE_WRITE \
67 ( PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_GPU_WRITE )
70 struct fenced_buffer_list
74 struct pipe_winsys
*winsys
;
78 struct list_head delayed
;
83 * Wrapper around a pipe buffer which adds fencing and reference counting.
87 struct pb_buffer base
;
89 struct pb_buffer
*buffer
;
91 /* FIXME: protect access with mutex */
94 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
100 struct pipe_fence_handle
*fence
;
102 struct list_head head
;
103 struct fenced_buffer_list
*list
;
107 static INLINE
struct fenced_buffer
*
108 fenced_buffer(struct pb_buffer
*buf
)
111 assert(buf
->vtbl
== &fenced_buffer_vtbl
);
112 return (struct fenced_buffer
*)buf
;
117 _fenced_buffer_add(struct fenced_buffer
*fenced_buf
)
119 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
121 assert(fenced_buf
->base
.base
.refcount
);
122 assert(fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
);
123 assert(fenced_buf
->fence
);
125 assert(!fenced_buf
->head
.prev
);
126 assert(!fenced_buf
->head
.next
);
127 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_list
->delayed
);
128 ++fenced_list
->numDelayed
;
133 * Actually destroy the buffer.
136 _fenced_buffer_destroy(struct fenced_buffer
*fenced_buf
)
138 assert(!fenced_buf
->base
.base
.refcount
);
139 assert(!fenced_buf
->fence
);
140 pb_reference(&fenced_buf
->buffer
, NULL
);
146 _fenced_buffer_remove(struct fenced_buffer_list
*fenced_list
,
147 struct fenced_buffer
*fenced_buf
)
149 struct pipe_winsys
*winsys
= fenced_list
->winsys
;
151 assert(fenced_buf
->fence
);
152 assert(fenced_buf
->list
== fenced_list
);
154 winsys
->fence_reference(winsys
, &fenced_buf
->fence
, NULL
);
155 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
157 assert(fenced_buf
->head
.prev
);
158 assert(fenced_buf
->head
.next
);
159 LIST_DEL(&fenced_buf
->head
);
161 fenced_buf
->head
.prev
= NULL
;
162 fenced_buf
->head
.next
= NULL
;
165 assert(fenced_list
->numDelayed
);
166 --fenced_list
->numDelayed
;
168 if(!fenced_buf
->base
.base
.refcount
)
169 _fenced_buffer_destroy(fenced_buf
);
173 static INLINE
enum pipe_error
174 _fenced_buffer_finish(struct fenced_buffer
*fenced_buf
)
176 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
177 struct pipe_winsys
*winsys
= fenced_list
->winsys
;
180 debug_warning("waiting for GPU");
183 assert(fenced_buf
->fence
);
184 if(fenced_buf
->fence
) {
185 if(winsys
->fence_finish(winsys
, fenced_buf
->fence
, 0) != 0) {
188 /* Remove from the fenced list */
189 /* TODO: remove consequents */
190 _fenced_buffer_remove(fenced_list
, fenced_buf
);
193 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
199 * Free as many fenced buffers from the list head as possible.
202 _fenced_buffer_list_check_free(struct fenced_buffer_list
*fenced_list
,
205 struct pipe_winsys
*winsys
= fenced_list
->winsys
;
206 struct list_head
*curr
, *next
;
207 struct fenced_buffer
*fenced_buf
;
208 struct pipe_fence_handle
*prev_fence
= NULL
;
210 curr
= fenced_list
->delayed
.next
;
212 while(curr
!= &fenced_list
->delayed
) {
213 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
215 if(fenced_buf
->fence
!= prev_fence
) {
218 signaled
= winsys
->fence_finish(winsys
, fenced_buf
->fence
, 0);
220 signaled
= winsys
->fence_signalled(winsys
, fenced_buf
->fence
, 0);
223 prev_fence
= fenced_buf
->fence
;
226 assert(winsys
->fence_signalled(winsys
, fenced_buf
->fence
, 0) == 0);
229 _fenced_buffer_remove(fenced_list
, fenced_buf
);
238 fenced_buffer_destroy(struct pb_buffer
*buf
)
240 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
241 struct fenced_buffer_list
*fenced_list
= fenced_buf
->list
;
243 pipe_mutex_lock(fenced_list
->mutex
);
244 assert(fenced_buf
->base
.base
.refcount
== 0);
245 if (fenced_buf
->fence
) {
246 struct pipe_winsys
*winsys
= fenced_list
->winsys
;
247 if(winsys
->fence_signalled(winsys
, fenced_buf
->fence
, 0) == 0) {
248 struct list_head
*curr
, *prev
;
249 curr
= &fenced_buf
->head
;
252 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
253 assert(winsys
->fence_signalled(winsys
, fenced_buf
->fence
, 0) == 0);
254 _fenced_buffer_remove(fenced_list
, fenced_buf
);
257 } while (curr
!= &fenced_list
->delayed
);
260 /* delay destruction */
264 _fenced_buffer_destroy(fenced_buf
);
266 pipe_mutex_unlock(fenced_list
->mutex
);
271 fenced_buffer_map(struct pb_buffer
*buf
,
274 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
277 assert(!(flags
& ~PIPE_BUFFER_USAGE_CPU_READ_WRITE
));
278 flags
&= PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
280 /* Check for GPU read/write access */
281 if(fenced_buf
->flags
& PIPE_BUFFER_USAGE_GPU_WRITE
) {
282 /* Wait for the GPU to finish writing */
283 _fenced_buffer_finish(fenced_buf
);
287 /* Check for CPU write access (read is OK) */
288 if(fenced_buf
->flags
& PIPE_BUFFER_USAGE_CPU_READ_WRITE
) {
289 /* this is legal -- just for debugging */
290 debug_warning("concurrent CPU writes");
294 map
= pb_map(fenced_buf
->buffer
, flags
);
296 ++fenced_buf
->mapcount
;
297 fenced_buf
->flags
|= flags
;
305 fenced_buffer_unmap(struct pb_buffer
*buf
)
307 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
308 assert(fenced_buf
->mapcount
);
309 if(fenced_buf
->mapcount
) {
310 pb_unmap(fenced_buf
->buffer
);
311 --fenced_buf
->mapcount
;
312 if(!fenced_buf
->mapcount
)
313 fenced_buf
->flags
&= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
319 fenced_buffer_get_base_buffer(struct pb_buffer
*buf
,
320 struct pb_buffer
**base_buf
,
323 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
324 pb_get_base_buffer(fenced_buf
->buffer
, base_buf
, offset
);
329 fenced_buffer_vtbl
= {
330 fenced_buffer_destroy
,
333 fenced_buffer_get_base_buffer
338 fenced_buffer_create(struct fenced_buffer_list
*fenced_list
,
339 struct pb_buffer
*buffer
)
341 struct fenced_buffer
*buf
;
346 buf
= CALLOC_STRUCT(fenced_buffer
);
348 pb_reference(&buffer
, NULL
);
352 buf
->base
.base
.refcount
= 1;
353 buf
->base
.base
.alignment
= buffer
->base
.alignment
;
354 buf
->base
.base
.usage
= buffer
->base
.usage
;
355 buf
->base
.base
.size
= buffer
->base
.size
;
357 buf
->base
.vtbl
= &fenced_buffer_vtbl
;
358 buf
->buffer
= buffer
;
359 buf
->list
= fenced_list
;
366 buffer_fence(struct pb_buffer
*buf
,
367 struct pipe_fence_handle
*fence
)
369 struct fenced_buffer
*fenced_buf
;
370 struct fenced_buffer_list
*fenced_list
;
371 struct pipe_winsys
*winsys
;
372 /* FIXME: receive this as a parameter */
373 unsigned flags
= fence
? PIPE_BUFFER_USAGE_GPU_READ_WRITE
: 0;
375 /* This is a public function, so be extra cautious with the buffer passed,
376 * as happens frequently to receive null buffers, or pointer to buffers
377 * other than fenced buffers. */
381 assert(buf
->vtbl
== &fenced_buffer_vtbl
);
382 if(buf
->vtbl
!= &fenced_buffer_vtbl
)
385 fenced_buf
= fenced_buffer(buf
);
386 fenced_list
= fenced_buf
->list
;
387 winsys
= fenced_list
->winsys
;
389 if(!fence
|| fence
== fenced_buf
->fence
) {
390 /* Handle the same fence case specially, not only because it is a fast
391 * path, but mostly to avoid serializing two writes with the same fence,
392 * as that would bring the hardware down to synchronous operation without
395 fenced_buf
->flags
|= flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
399 pipe_mutex_lock(fenced_list
->mutex
);
400 if (fenced_buf
->fence
)
401 _fenced_buffer_remove(fenced_list
, fenced_buf
);
403 winsys
->fence_reference(winsys
, &fenced_buf
->fence
, fence
);
404 fenced_buf
->flags
|= flags
& PIPE_BUFFER_USAGE_GPU_READ_WRITE
;
405 _fenced_buffer_add(fenced_buf
);
407 pipe_mutex_unlock(fenced_list
->mutex
);
411 struct fenced_buffer_list
*
412 fenced_buffer_list_create(struct pipe_winsys
*winsys
)
414 struct fenced_buffer_list
*fenced_list
;
416 fenced_list
= CALLOC_STRUCT(fenced_buffer_list
);
420 fenced_list
->winsys
= winsys
;
422 LIST_INITHEAD(&fenced_list
->delayed
);
424 fenced_list
->numDelayed
= 0;
426 pipe_mutex_init(fenced_list
->mutex
);
433 fenced_buffer_list_check_free(struct fenced_buffer_list
*fenced_list
,
436 pipe_mutex_lock(fenced_list
->mutex
);
437 _fenced_buffer_list_check_free(fenced_list
, wait
);
438 pipe_mutex_unlock(fenced_list
->mutex
);
443 fenced_buffer_list_destroy(struct fenced_buffer_list
*fenced_list
)
445 pipe_mutex_lock(fenced_list
->mutex
);
447 /* Wait on outstanding fences */
448 while (fenced_list
->numDelayed
) {
449 pipe_mutex_unlock(fenced_list
->mutex
);
450 #if defined(PIPE_OS_LINUX)
453 _fenced_buffer_list_check_free(fenced_list
, 1);
454 pipe_mutex_lock(fenced_list
->mutex
);
457 pipe_mutex_unlock(fenced_list
->mutex
);