pipebuffer: Dump debugging info for fenced buffer list.
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 */
35
36
37 #include "pipe/p_config.h"
38
39 #if defined(PIPE_OS_LINUX)
40 #include <unistd.h>
41 #include <sched.h>
42 #endif
43
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_error.h"
46 #include "pipe/p_debug.h"
47 #include "pipe/p_winsys.h"
48 #include "pipe/p_thread.h"
49 #include "util/u_memory.h"
50 #include "util/u_double_list.h"
51
52 #include "pb_buffer.h"
53 #include "pb_buffer_fenced.h"
54
55
56
57 /**
58 * Convenience macro (type safe).
59 */
60 #define SUPER(__derived) (&(__derived)->base)
61
62 #define PIPE_BUFFER_USAGE_CPU_READ_WRITE \
63 ( PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE )
64 #define PIPE_BUFFER_USAGE_GPU_READ_WRITE \
65 ( PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE )
66 #define PIPE_BUFFER_USAGE_WRITE \
67 ( PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_GPU_WRITE )
68
69
70 struct fenced_buffer_list
71 {
72 pipe_mutex mutex;
73
74 struct pipe_winsys *winsys;
75
76 size_t numDelayed;
77 struct list_head delayed;
78
79 #ifdef DEBUG
80 size_t numUnfenced;
81 struct list_head unfenced;
82 #endif
83 };
84
85
86 /**
87 * Wrapper around a pipe buffer which adds fencing and reference counting.
88 */
89 struct fenced_buffer
90 {
91 struct pb_buffer base;
92
93 struct pb_buffer *buffer;
94
95 /* FIXME: protect access with mutex */
96
97 /**
98 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
99 * buffer usage.
100 */
101 unsigned flags;
102
103 unsigned mapcount;
104 struct pipe_fence_handle *fence;
105
106 struct list_head head;
107 struct fenced_buffer_list *list;
108 };
109
110
111 static INLINE struct fenced_buffer *
112 fenced_buffer(struct pb_buffer *buf)
113 {
114 assert(buf);
115 assert(buf->vtbl == &fenced_buffer_vtbl);
116 return (struct fenced_buffer *)buf;
117 }
118
119
120 static INLINE void
121 _fenced_buffer_add(struct fenced_buffer *fenced_buf)
122 {
123 struct fenced_buffer_list *fenced_list = fenced_buf->list;
124
125 assert(fenced_buf->base.base.refcount);
126 assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
127 assert(fenced_buf->fence);
128
129 #ifdef DEBUG
130 LIST_DEL(&fenced_buf->head);
131 assert(fenced_list->numUnfenced);
132 --fenced_list->numUnfenced;
133 #endif
134 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
135 ++fenced_list->numDelayed;
136 }
137
138
139 /**
140 * Actually destroy the buffer.
141 */
142 static INLINE void
143 _fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
144 {
145 struct fenced_buffer_list *fenced_list = fenced_buf->list;
146
147 assert(!fenced_buf->base.base.refcount);
148 assert(!fenced_buf->fence);
149 #ifdef DEBUG
150 assert(fenced_buf->head.prev);
151 assert(fenced_buf->head.next);
152 LIST_DEL(&fenced_buf->head);
153 assert(fenced_list->numUnfenced);
154 --fenced_list->numUnfenced;
155 #else
156 (void)fenced_list;
157 #endif
158 pb_reference(&fenced_buf->buffer, NULL);
159 FREE(fenced_buf);
160 }
161
162
163 static INLINE void
164 _fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
165 struct fenced_buffer *fenced_buf)
166 {
167 struct pipe_winsys *winsys = fenced_list->winsys;
168
169 assert(fenced_buf->fence);
170 assert(fenced_buf->list == fenced_list);
171
172 winsys->fence_reference(winsys, &fenced_buf->fence, NULL);
173 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
174
175 assert(fenced_buf->head.prev);
176 assert(fenced_buf->head.next);
177
178 LIST_DEL(&fenced_buf->head);
179 assert(fenced_list->numDelayed);
180 --fenced_list->numDelayed;
181
182 #ifdef DEBUG
183 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
184 ++fenced_list->numUnfenced;
185 #endif
186
187 if(!fenced_buf->base.base.refcount)
188 _fenced_buffer_destroy(fenced_buf);
189 }
190
191
192 static INLINE enum pipe_error
193 _fenced_buffer_finish(struct fenced_buffer *fenced_buf)
194 {
195 struct fenced_buffer_list *fenced_list = fenced_buf->list;
196 struct pipe_winsys *winsys = fenced_list->winsys;
197
198 #if 0
199 debug_warning("waiting for GPU");
200 #endif
201
202 assert(fenced_buf->fence);
203 if(fenced_buf->fence) {
204 if(winsys->fence_finish(winsys, fenced_buf->fence, 0) != 0) {
205 return PIPE_ERROR;
206 }
207 /* Remove from the fenced list */
208 /* TODO: remove consequents */
209 _fenced_buffer_remove(fenced_list, fenced_buf);
210 }
211
212 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
213 return PIPE_OK;
214 }
215
216
217 /**
218 * Free as many fenced buffers from the list head as possible.
219 */
220 static void
221 _fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
222 int wait)
223 {
224 struct pipe_winsys *winsys = fenced_list->winsys;
225 struct list_head *curr, *next;
226 struct fenced_buffer *fenced_buf;
227 struct pipe_fence_handle *prev_fence = NULL;
228
229 curr = fenced_list->delayed.next;
230 next = curr->next;
231 while(curr != &fenced_list->delayed) {
232 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
233
234 if(fenced_buf->fence != prev_fence) {
235 int signaled;
236 if (wait)
237 signaled = winsys->fence_finish(winsys, fenced_buf->fence, 0);
238 else
239 signaled = winsys->fence_signalled(winsys, fenced_buf->fence, 0);
240 if (signaled != 0)
241 break;
242 prev_fence = fenced_buf->fence;
243 }
244 else {
245 assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0);
246 }
247
248 _fenced_buffer_remove(fenced_list, fenced_buf);
249
250 curr = next;
251 next = curr->next;
252 }
253 }
254
255
256 static void
257 fenced_buffer_destroy(struct pb_buffer *buf)
258 {
259 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
260 struct fenced_buffer_list *fenced_list = fenced_buf->list;
261
262 pipe_mutex_lock(fenced_list->mutex);
263 assert(fenced_buf->base.base.refcount == 0);
264 if (fenced_buf->fence) {
265 struct pipe_winsys *winsys = fenced_list->winsys;
266 if(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0) {
267 struct list_head *curr, *prev;
268 curr = &fenced_buf->head;
269 prev = curr->prev;
270 do {
271 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
272 assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0);
273 _fenced_buffer_remove(fenced_list, fenced_buf);
274 curr = prev;
275 prev = curr->prev;
276 } while (curr != &fenced_list->delayed);
277 }
278 else {
279 /* delay destruction */
280 }
281 }
282 else {
283 _fenced_buffer_destroy(fenced_buf);
284 }
285 pipe_mutex_unlock(fenced_list->mutex);
286 }
287
288
289 static void *
290 fenced_buffer_map(struct pb_buffer *buf,
291 unsigned flags)
292 {
293 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
294 void *map;
295
296 assert(!(flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE));
297 flags &= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
298
299 /* Check for GPU read/write access */
300 if(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) {
301 /* Wait for the GPU to finish writing */
302 _fenced_buffer_finish(fenced_buf);
303 }
304
305 #if 0
306 /* Check for CPU write access (read is OK) */
307 if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
308 /* this is legal -- just for debugging */
309 debug_warning("concurrent CPU writes");
310 }
311 #endif
312
313 map = pb_map(fenced_buf->buffer, flags);
314 if(map) {
315 ++fenced_buf->mapcount;
316 fenced_buf->flags |= flags;
317 }
318
319 return map;
320 }
321
322
323 static void
324 fenced_buffer_unmap(struct pb_buffer *buf)
325 {
326 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
327 assert(fenced_buf->mapcount);
328 if(fenced_buf->mapcount) {
329 pb_unmap(fenced_buf->buffer);
330 --fenced_buf->mapcount;
331 if(!fenced_buf->mapcount)
332 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
333 }
334 }
335
336
337 static void
338 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
339 struct pb_buffer **base_buf,
340 unsigned *offset)
341 {
342 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
343 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
344 }
345
346
347 const struct pb_vtbl
348 fenced_buffer_vtbl = {
349 fenced_buffer_destroy,
350 fenced_buffer_map,
351 fenced_buffer_unmap,
352 fenced_buffer_get_base_buffer
353 };
354
355
356 struct pb_buffer *
357 fenced_buffer_create(struct fenced_buffer_list *fenced_list,
358 struct pb_buffer *buffer)
359 {
360 struct fenced_buffer *buf;
361
362 if(!buffer)
363 return NULL;
364
365 buf = CALLOC_STRUCT(fenced_buffer);
366 if(!buf) {
367 pb_reference(&buffer, NULL);
368 return NULL;
369 }
370
371 buf->base.base.refcount = 1;
372 buf->base.base.alignment = buffer->base.alignment;
373 buf->base.base.usage = buffer->base.usage;
374 buf->base.base.size = buffer->base.size;
375
376 buf->base.vtbl = &fenced_buffer_vtbl;
377 buf->buffer = buffer;
378 buf->list = fenced_list;
379
380 #ifdef DEBUG
381 pipe_mutex_lock(fenced_list->mutex);
382 LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
383 ++fenced_list->numUnfenced;
384 pipe_mutex_unlock(fenced_list->mutex);
385 #endif
386
387 return &buf->base;
388 }
389
390
391 void
392 buffer_fence(struct pb_buffer *buf,
393 struct pipe_fence_handle *fence)
394 {
395 struct fenced_buffer *fenced_buf;
396 struct fenced_buffer_list *fenced_list;
397 struct pipe_winsys *winsys;
398 /* FIXME: receive this as a parameter */
399 unsigned flags = fence ? PIPE_BUFFER_USAGE_GPU_READ_WRITE : 0;
400
401 /* This is a public function, so be extra cautious with the buffer passed,
402 * as happens frequently to receive null buffers, or pointer to buffers
403 * other than fenced buffers. */
404 assert(buf);
405 if(!buf)
406 return;
407 assert(buf->vtbl == &fenced_buffer_vtbl);
408 if(buf->vtbl != &fenced_buffer_vtbl)
409 return;
410
411 fenced_buf = fenced_buffer(buf);
412 fenced_list = fenced_buf->list;
413 winsys = fenced_list->winsys;
414
415 if(!fence || fence == fenced_buf->fence) {
416 /* Handle the same fence case specially, not only because it is a fast
417 * path, but mostly to avoid serializing two writes with the same fence,
418 * as that would bring the hardware down to synchronous operation without
419 * any benefit.
420 */
421 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
422 return;
423 }
424
425 pipe_mutex_lock(fenced_list->mutex);
426 if (fenced_buf->fence)
427 _fenced_buffer_remove(fenced_list, fenced_buf);
428 if (fence) {
429 winsys->fence_reference(winsys, &fenced_buf->fence, fence);
430 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE;
431 _fenced_buffer_add(fenced_buf);
432 }
433 pipe_mutex_unlock(fenced_list->mutex);
434 }
435
436
437 struct fenced_buffer_list *
438 fenced_buffer_list_create(struct pipe_winsys *winsys)
439 {
440 struct fenced_buffer_list *fenced_list;
441
442 fenced_list = CALLOC_STRUCT(fenced_buffer_list);
443 if (!fenced_list)
444 return NULL;
445
446 fenced_list->winsys = winsys;
447
448 LIST_INITHEAD(&fenced_list->delayed);
449 fenced_list->numDelayed = 0;
450
451 #ifdef DEBUG
452 LIST_INITHEAD(&fenced_list->unfenced);
453 fenced_list->numUnfenced = 0;
454 #endif
455
456 pipe_mutex_init(fenced_list->mutex);
457
458 return fenced_list;
459 }
460
461
462 void
463 fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
464 int wait)
465 {
466 pipe_mutex_lock(fenced_list->mutex);
467 _fenced_buffer_list_check_free(fenced_list, wait);
468 pipe_mutex_unlock(fenced_list->mutex);
469 }
470
471
472 #ifdef DEBUG
473 void
474 fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
475 {
476 struct pipe_winsys *winsys = fenced_list->winsys;
477 struct list_head *curr, *next;
478 struct fenced_buffer *fenced_buf;
479 struct pipe_fence_handle *prev_fence = NULL;
480
481 pipe_mutex_lock(fenced_list->mutex);
482
483 debug_printf("%10s %7s %10s %s\n",
484 "buffer", "refcount", "fence", "signalled");
485
486 curr = fenced_list->unfenced.next;
487 next = curr->next;
488 while(curr != &fenced_list->unfenced) {
489 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
490 assert(!fenced_buf->fence);
491 debug_printf("%10p %7u\n",
492 fenced_buf,
493 fenced_buf->base.base.refcount);
494 curr = next;
495 next = curr->next;
496 }
497
498 curr = fenced_list->delayed.next;
499 next = curr->next;
500 while(curr != &fenced_list->delayed) {
501 int signaled;
502 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
503 signaled = winsys->fence_signalled(winsys, fenced_buf->fence, 0);
504 debug_printf("%10p %7u %10p %s\n",
505 fenced_buf,
506 fenced_buf->base.base.refcount,
507 fenced_buf->fence,
508 signaled == 0 ? "y" : "n");
509 curr = next;
510 next = curr->next;
511 }
512
513 pipe_mutex_unlock(fenced_list->mutex);
514 }
515 #endif
516
517
518 void
519 fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
520 {
521 pipe_mutex_lock(fenced_list->mutex);
522
523 /* Wait on outstanding fences */
524 while (fenced_list->numDelayed) {
525 pipe_mutex_unlock(fenced_list->mutex);
526 #if defined(PIPE_OS_LINUX)
527 sched_yield();
528 #endif
529 _fenced_buffer_list_check_free(fenced_list, 1);
530 pipe_mutex_lock(fenced_list->mutex);
531 }
532
533 #ifdef DEBUG
534 //assert(!fenced_list->numUnfenced);
535 #endif
536
537 pipe_mutex_unlock(fenced_list->mutex);
538
539 FREE(fenced_list);
540 }
541
542