c0be668180088b2225d8946d3ec31beb37e0173d
[mesa.git] / src / gallium / auxiliary / util / u_debug_flush.c
1 /**************************************************************************
2 *
3 * Copyright 2012 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * u_debug_flush.c Debug flush and map-related issues:
31 * - Flush while synchronously mapped.
32 * - Command stream reference while synchronously mapped.
33 * - Synchronous map while referenced on command stream.
34 * - Recursive maps.
35 * - Unmap while not mapped.
36 *
37 * @author Thomas Hellstrom <thellstrom@vmware.com>
38 */
39
40 #ifdef DEBUG
41 #include "pipe/p_compiler.h"
42 #include "util/u_debug_stack.h"
43 #include "util/u_debug.h"
44 #include "util/u_memory.h"
45 #include "util/u_debug_flush.h"
46 #include "util/u_hash_table.h"
47 #include "util/list.h"
48 #include "util/u_inlines.h"
49 #include "util/u_string.h"
50 #include "os/os_thread.h"
51 #include <stdio.h>
52
53 /* Future improvement: Use realloc instead? */
54 #define DEBUG_FLUSH_MAP_DEPTH 16
55
56 struct debug_map_item {
57 struct debug_stack_frame *frame;
58 boolean persistent;
59 };
60
61 struct debug_flush_buf {
62 /* Atomic */
63 struct pipe_reference reference; /* Must be the first member. */
64 mtx_t mutex;
65 /* Immutable */
66 boolean supports_persistent;
67 unsigned bt_depth;
68 /* Protected by mutex */
69 int map_count;
70 boolean has_sync_map;
71 int last_sync_map;
72 struct debug_map_item maps[DEBUG_FLUSH_MAP_DEPTH];
73 };
74
75 struct debug_flush_item {
76 struct debug_flush_buf *fbuf;
77 unsigned bt_depth;
78 struct debug_stack_frame *ref_frame;
79 };
80
81 struct debug_flush_ctx {
82 /* Contexts are used by a single thread at a time */
83 unsigned bt_depth;
84 boolean catch_map_of_referenced;
85 struct util_hash_table *ref_hash;
86 struct list_head head;
87 };
88
89 static mtx_t list_mutex = _MTX_INITIALIZER_NP;
90 static struct list_head ctx_list = {&ctx_list, &ctx_list};
91
92 static struct debug_stack_frame *
93 debug_flush_capture_frame(int start, int depth)
94 {
95 struct debug_stack_frame *frames;
96
97 frames = CALLOC(depth, sizeof(*frames));
98 if (!frames)
99 return NULL;
100
101 debug_backtrace_capture(frames, start, depth);
102 return frames;
103 }
104
105 static int
106 debug_flush_pointer_compare(void *key1, void *key2)
107 {
108 return (key1 == key2) ? 0 : 1;
109 }
110
111 static unsigned
112 debug_flush_pointer_hash(void *key)
113 {
114 return (unsigned) (unsigned long) key;
115 }
116
117 struct debug_flush_buf *
118 debug_flush_buf_create(boolean supports_persistent, unsigned bt_depth)
119 {
120 struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
121
122 if (!fbuf)
123 goto out_no_buf;
124
125 fbuf->supports_persistent = supports_persistent;
126 fbuf->bt_depth = bt_depth;
127 pipe_reference_init(&fbuf->reference, 1);
128 (void) mtx_init(&fbuf->mutex, mtx_plain);
129
130 return fbuf;
131 out_no_buf:
132 debug_printf("Debug flush buffer creation failed.\n");
133 debug_printf("Debug flush checking for this buffer will be incomplete.\n");
134 return NULL;
135 }
136
137 void
138 debug_flush_buf_reference(struct debug_flush_buf **dst,
139 struct debug_flush_buf *src)
140 {
141 struct debug_flush_buf *fbuf = *dst;
142
143 if (pipe_reference(&(*dst)->reference, &src->reference)) {
144 int i;
145
146 for (i = 0; i < fbuf->map_count; ++i) {
147 FREE(fbuf->maps[i].frame);
148 }
149 FREE(fbuf);
150 }
151
152 *dst = src;
153 }
154
155 static void
156 debug_flush_item_destroy(struct debug_flush_item *item)
157 {
158 debug_flush_buf_reference(&item->fbuf, NULL);
159
160 FREE(item->ref_frame);
161
162 FREE(item);
163 }
164
165 struct debug_flush_ctx *
166 debug_flush_ctx_create(UNUSED boolean catch_reference_of_mapped,
167 unsigned bt_depth)
168 {
169 struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
170
171 if (!fctx)
172 goto out_no_ctx;
173
174 fctx->ref_hash = util_hash_table_create(debug_flush_pointer_hash,
175 debug_flush_pointer_compare);
176
177 if (!fctx->ref_hash)
178 goto out_no_ref_hash;
179
180 fctx->bt_depth = bt_depth;
181 mtx_lock(&list_mutex);
182 list_addtail(&fctx->head, &ctx_list);
183 mtx_unlock(&list_mutex);
184
185 return fctx;
186
187 out_no_ref_hash:
188 FREE(fctx);
189 out_no_ctx:
190 debug_printf("Debug flush context creation failed.\n");
191 debug_printf("Debug flush checking for this context will be incomplete.\n");
192 return NULL;
193 }
194
195 static void
196 debug_flush_alert(const char *s, const char *op,
197 unsigned start, unsigned depth,
198 boolean continued,
199 boolean capture,
200 const struct debug_stack_frame *frame)
201 {
202 if (capture)
203 frame = debug_flush_capture_frame(start, depth);
204
205 if (s)
206 debug_printf("%s ", s);
207 if (frame) {
208 debug_printf("%s backtrace follows:\n", op);
209 debug_backtrace_dump(frame, depth);
210 } else
211 debug_printf("No %s backtrace was captured.\n", op);
212
213 if (continued)
214 debug_printf("**********************************\n");
215 else
216 debug_printf("*********END OF MESSAGE***********\n\n\n");
217
218 if (capture)
219 FREE((void *)frame);
220 }
221
222
223 void
224 debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
225 {
226 boolean map_sync, persistent;
227
228 if (!fbuf)
229 return;
230
231 mtx_lock(&fbuf->mutex);
232 map_sync = !(flags & PIPE_TRANSFER_UNSYNCHRONIZED);
233 persistent = !map_sync || fbuf->supports_persistent ||
234 !!(flags & PIPE_TRANSFER_PERSISTENT);
235
236 /* Recursive maps are allowed if previous maps are persistent,
237 * or if the current map is unsync. In other cases we might flush
238 * with unpersistent maps.
239 */
240 if (fbuf->has_sync_map && !map_sync) {
241 debug_flush_alert("Recursive sync map detected.", "Map",
242 2, fbuf->bt_depth, TRUE, TRUE, NULL);
243 debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
244 FALSE, fbuf->maps[fbuf->last_sync_map].frame);
245 }
246
247 fbuf->maps[fbuf->map_count].frame =
248 debug_flush_capture_frame(1, fbuf->bt_depth);
249 fbuf->maps[fbuf->map_count].persistent = persistent;
250 if (!persistent) {
251 fbuf->has_sync_map = TRUE;
252 fbuf->last_sync_map = fbuf->map_count;
253 }
254
255 fbuf->map_count++;
256 assert(fbuf->map_count < DEBUG_FLUSH_MAP_DEPTH);
257
258 mtx_unlock(&fbuf->mutex);
259
260 if (!persistent) {
261 struct debug_flush_ctx *fctx;
262
263 mtx_lock(&list_mutex);
264 LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
265 struct debug_flush_item *item =
266 util_hash_table_get(fctx->ref_hash, fbuf);
267
268 if (item && fctx->catch_map_of_referenced) {
269 debug_flush_alert("Already referenced map detected.",
270 "Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
271 debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
272 FALSE, FALSE, item->ref_frame);
273 }
274 }
275 mtx_unlock(&list_mutex);
276 }
277 }
278
279 void
280 debug_flush_unmap(struct debug_flush_buf *fbuf)
281 {
282 if (!fbuf)
283 return;
284
285 mtx_lock(&fbuf->mutex);
286 if (--fbuf->map_count < 0) {
287 debug_flush_alert("Unmap not previously mapped detected.", "Map",
288 2, fbuf->bt_depth, FALSE, TRUE, NULL);
289 } else {
290 if (fbuf->has_sync_map && fbuf->last_sync_map == fbuf->map_count) {
291 int i = fbuf->map_count;
292
293 fbuf->has_sync_map = FALSE;
294 while (i-- && !fbuf->has_sync_map) {
295 if (!fbuf->maps[i].persistent) {
296 fbuf->has_sync_map = TRUE;
297 fbuf->last_sync_map = i;
298 }
299 }
300 FREE(fbuf->maps[fbuf->map_count].frame);
301 fbuf->maps[fbuf->map_count].frame = NULL;
302 }
303 }
304 mtx_unlock(&fbuf->mutex);
305 }
306
307
308 /**
309 * Add the given buffer to the list of active buffers. Active buffers
310 * are those which are referenced by the command buffer currently being
311 * constructed.
312 */
313 void
314 debug_flush_cb_reference(struct debug_flush_ctx *fctx,
315 struct debug_flush_buf *fbuf)
316 {
317 struct debug_flush_item *item;
318
319 if (!fctx || !fbuf)
320 return;
321
322 item = util_hash_table_get(fctx->ref_hash, fbuf);
323
324 mtx_lock(&fbuf->mutex);
325 if (fbuf->map_count && fbuf->has_sync_map) {
326 debug_flush_alert("Reference of mapped buffer detected.", "Reference",
327 2, fctx->bt_depth, TRUE, TRUE, NULL);
328 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
329 FALSE, fbuf->maps[fbuf->last_sync_map].frame);
330 }
331 mtx_unlock(&fbuf->mutex);
332
333 if (!item) {
334 item = CALLOC_STRUCT(debug_flush_item);
335 if (item) {
336 debug_flush_buf_reference(&item->fbuf, fbuf);
337 item->bt_depth = fctx->bt_depth;
338 item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
339 if (util_hash_table_set(fctx->ref_hash, fbuf, item) != PIPE_OK) {
340 debug_flush_item_destroy(item);
341 goto out_no_item;
342 }
343 return;
344 }
345 goto out_no_item;
346 }
347 return;
348
349 out_no_item:
350 debug_printf("Debug flush command buffer reference creation failed.\n");
351 debug_printf("Debug flush checking will be incomplete "
352 "for this command batch.\n");
353 }
354
355 static enum pipe_error
356 debug_flush_might_flush_cb(UNUSED void *key, void *value, void *data)
357 {
358 struct debug_flush_item *item =
359 (struct debug_flush_item *) value;
360 struct debug_flush_buf *fbuf = item->fbuf;
361
362 mtx_lock(&fbuf->mutex);
363 if (fbuf->map_count && fbuf->has_sync_map) {
364 const char *reason = (const char *) data;
365 char message[80];
366
367 util_snprintf(message, sizeof(message),
368 "%s referenced mapped buffer detected.", reason);
369
370 debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
371 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
372 fbuf->maps[fbuf->last_sync_map].frame);
373 debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
374 FALSE, item->ref_frame);
375 }
376 mtx_unlock(&fbuf->mutex);
377
378 return PIPE_OK;
379 }
380
381 /**
382 * Called when we're about to possibly flush a command buffer.
383 * We check if any active buffers are in a mapped state. If so, print an alert.
384 */
385 void
386 debug_flush_might_flush(struct debug_flush_ctx *fctx)
387 {
388 if (!fctx)
389 return;
390
391 util_hash_table_foreach(fctx->ref_hash,
392 debug_flush_might_flush_cb,
393 "Might flush");
394 }
395
396 static enum pipe_error
397 debug_flush_flush_cb(UNUSED void *key, void *value, UNUSED void *data)
398 {
399 struct debug_flush_item *item =
400 (struct debug_flush_item *) value;
401
402 debug_flush_item_destroy(item);
403
404 return PIPE_OK;
405 }
406
407
408 /**
409 * Called when we flush a command buffer. Two things are done:
410 * 1. Check if any of the active buffers are currently mapped (alert if so).
411 * 2. Discard/unreference all the active buffers.
412 */
413 void
414 debug_flush_flush(struct debug_flush_ctx *fctx)
415 {
416 if (!fctx)
417 return;
418
419 util_hash_table_foreach(fctx->ref_hash,
420 debug_flush_might_flush_cb,
421 "Flush");
422 util_hash_table_foreach(fctx->ref_hash,
423 debug_flush_flush_cb,
424 NULL);
425 util_hash_table_clear(fctx->ref_hash);
426 }
427
428 void
429 debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)
430 {
431 if (!fctx)
432 return;
433
434 list_del(&fctx->head);
435 util_hash_table_foreach(fctx->ref_hash,
436 debug_flush_flush_cb,
437 NULL);
438 util_hash_table_clear(fctx->ref_hash);
439 util_hash_table_destroy(fctx->ref_hash);
440 FREE(fctx);
441 }
442 #endif