gallium/util: Add flush/map debug utility code
[mesa.git] / src / gallium / auxiliary / util / u_debug_flush.c
1 /**************************************************************************
2 *
3 * Copyright 2012 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * u_debug_flush.c Debug flush and map-related issues:
31 * - Flush while synchronously mapped.
32 * - Command stream reference while synchronously mapped.
33 * - Synchronous map while referenced on command stream.
34 * - Recursive maps.
35 * - Unmap while not mapped.
36 *
37 * @author Thomas Hellstrom <thellstrom@vmware.com>
38 */
39
40 #ifdef DEBUG
41 #include "pipe/p_compiler.h"
42 #include "util/u_debug_stack.h"
43 #include "util/u_debug.h"
44 #include "util/u_memory.h"
45 #include "util/u_debug_flush.h"
46 #include "util/u_hash_table.h"
47 #include "util/u_double_list.h"
48 #include "util/u_inlines.h"
49 #include "os/os_thread.h"
50 #include <stdio.h>
51
52 struct debug_flush_buf {
53 /* Atomic */
54 struct pipe_reference reference; /* Must be the first member. */
55 pipe_mutex mutex;
56 /* Immutable */
57 boolean supports_unsync;
58 unsigned bt_depth;
59 /* Protected by mutex */
60 boolean mapped;
61 boolean mapped_sync;
62 struct debug_stack_frame *map_frame;
63 };
64
65 struct debug_flush_item {
66 struct debug_flush_buf *fbuf;
67 unsigned bt_depth;
68 struct debug_stack_frame *ref_frame;
69 };
70
71 struct debug_flush_ctx {
72 /* Contexts are used by a single thread at a time */
73 unsigned bt_depth;
74 boolean catch_map_of_referenced;
75 struct util_hash_table *ref_hash;
76 struct list_head head;
77 };
78
79 pipe_static_mutex(list_mutex);
80 static struct list_head ctx_list = {&ctx_list, &ctx_list};
81
82 static struct debug_stack_frame *
83 debug_flush_capture_frame(int start, int depth)
84 {
85 struct debug_stack_frame *frames;
86
87 frames = CALLOC(depth, sizeof(*frames));
88 if (!frames)
89 return NULL;
90
91 debug_backtrace_capture(frames, start, depth);
92 return frames;
93 }
94
95 static int
96 debug_flush_pointer_compare(void *key1, void *key2)
97 {
98 return (key1 == key2) ? 0 : 1;
99 }
100
101 static unsigned
102 debug_flush_pointer_hash(void *key)
103 {
104 return (unsigned) (unsigned long) key;
105 }
106
107 struct debug_flush_buf *
108 debug_flush_buf_create(boolean supports_unsync, unsigned bt_depth)
109 {
110 struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
111
112 if (!fbuf)
113 goto out_no_buf;
114
115 fbuf->supports_unsync = supports_unsync;
116 fbuf->bt_depth = bt_depth;
117 pipe_reference_init(&fbuf->reference, 1);
118 pipe_mutex_init(fbuf->mutex);
119
120 return fbuf;
121 out_no_buf:
122 debug_printf("Debug flush buffer creation failed.\n");
123 debug_printf("Debug flush checking for this buffer will be incomplete.\n");
124 return NULL;
125 }
126
127 void
128 debug_flush_buf_reference(struct debug_flush_buf **dst,
129 struct debug_flush_buf *src)
130 {
131 struct debug_flush_buf *fbuf = *dst;
132
133 if (pipe_reference(&(*dst)->reference, &src->reference)) {
134 if (fbuf->map_frame)
135 FREE(fbuf->map_frame);
136
137 FREE(fbuf);
138 }
139
140 *dst = src;
141 }
142
143 static void
144 debug_flush_item_destroy(struct debug_flush_item *item)
145 {
146 debug_flush_buf_reference(&item->fbuf, NULL);
147
148 if (item->ref_frame)
149 FREE(item->ref_frame);
150
151 FREE(item);
152 }
153
154 struct debug_flush_ctx *
155 debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
156 {
157 struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
158
159 if (!fctx)
160 goto out_no_ctx;
161
162 fctx->ref_hash = util_hash_table_create(debug_flush_pointer_hash,
163 debug_flush_pointer_compare);
164
165 if (!fctx->ref_hash)
166 goto out_no_ref_hash;
167
168 fctx->bt_depth = bt_depth;
169 pipe_mutex_lock(list_mutex);
170 list_addtail(&fctx->head, &ctx_list);
171 pipe_mutex_unlock(list_mutex);
172
173 return fctx;
174
175 out_no_ref_hash:
176 FREE(fctx);
177 out_no_ctx:
178 debug_printf("Debug flush context creation failed.\n");
179 debug_printf("Debug flush checking for this context will be incomplete.\n");
180 return NULL;
181 }
182
183 static void
184 debug_flush_alert(const char *s, const char *op,
185 unsigned start, unsigned depth,
186 boolean continued,
187 boolean capture,
188 const struct debug_stack_frame *frame)
189 {
190 if (capture)
191 frame = debug_flush_capture_frame(start, depth);
192
193 if (s)
194 debug_printf("%s ", s);
195 if (frame) {
196 debug_printf("%s backtrace follows:\n", op);
197 debug_backtrace_dump(frame, depth);
198 } else
199 debug_printf("No %s backtrace was captured.\n", op);
200
201 if (continued)
202 debug_printf("**********************************\n");
203 else
204 debug_printf("*********END OF MESSAGE***********\n\n\n");
205
206 if (capture)
207 FREE((void *)frame);
208 }
209
210
211 void
212 debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
213 {
214 boolean mapped_sync = FALSE;
215
216 if (!fbuf)
217 return;
218
219 pipe_mutex_lock(fbuf->mutex);
220 if (fbuf->mapped) {
221 debug_flush_alert("Recursive map detected.", "Map",
222 2, fbuf->bt_depth, TRUE, TRUE, NULL);
223 debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
224 FALSE, fbuf->map_frame);
225 } else if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) ||
226 !fbuf->supports_unsync) {
227 fbuf->mapped_sync = mapped_sync = TRUE;
228 }
229 fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
230 fbuf->mapped = TRUE;
231 pipe_mutex_unlock(fbuf->mutex);
232
233 if (mapped_sync) {
234 struct debug_flush_ctx *fctx;
235
236 pipe_mutex_lock(list_mutex);
237 LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
238 struct debug_flush_item *item =
239 util_hash_table_get(fctx->ref_hash, fbuf);
240
241 if (item && fctx->catch_map_of_referenced) {
242 debug_flush_alert("Already referenced map detected.",
243 "Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
244 debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
245 FALSE, FALSE, item->ref_frame);
246 }
247 }
248 pipe_mutex_unlock(list_mutex);
249 }
250 }
251
252 void
253 debug_flush_unmap(struct debug_flush_buf *fbuf)
254 {
255 if (!fbuf)
256 return;
257
258 pipe_mutex_lock(fbuf->mutex);
259 if (!fbuf->mapped)
260 debug_flush_alert("Unmap not previously mapped detected.", "Map",
261 2, fbuf->bt_depth, FALSE, TRUE, NULL);
262
263 fbuf->mapped_sync = FALSE;
264 fbuf->mapped = FALSE;
265 if (fbuf->map_frame) {
266 FREE(fbuf->map_frame);
267 fbuf->map_frame = NULL;
268 }
269 pipe_mutex_unlock(fbuf->mutex);
270 }
271
272 void
273 debug_flush_cb_reference(struct debug_flush_ctx *fctx,
274 struct debug_flush_buf *fbuf)
275 {
276 struct debug_flush_item *item;
277
278 if (!fctx || !fbuf)
279 return;
280
281 item = util_hash_table_get(fctx->ref_hash, fbuf);
282
283 pipe_mutex_lock(fbuf->mutex);
284 if (fbuf->mapped_sync) {
285 debug_flush_alert("Reference of mapped buffer detected.", "Reference",
286 2, fctx->bt_depth, TRUE, TRUE, NULL);
287 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
288 FALSE, fbuf->map_frame);
289 }
290 pipe_mutex_unlock(fbuf->mutex);
291
292 if (!item) {
293 item = CALLOC_STRUCT(debug_flush_item);
294 if (item) {
295 debug_flush_buf_reference(&item->fbuf, fbuf);
296 item->bt_depth = fctx->bt_depth;
297 item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
298 if (util_hash_table_set(fctx->ref_hash, fbuf, item) != PIPE_OK) {
299 debug_flush_item_destroy(item);
300 goto out_no_item;
301 }
302 return;
303 }
304 goto out_no_item;
305 }
306 return;
307
308 out_no_item:
309 debug_printf("Debug flush command buffer reference creation failed.\n");
310 debug_printf("Debug flush checking will be incomplete "
311 "for this command batch.\n");
312 }
313
314 static enum pipe_error
315 debug_flush_might_flush_cb(void *key, void *value, void *data)
316 {
317 struct debug_flush_item *item =
318 (struct debug_flush_item *) value;
319 struct debug_flush_buf *fbuf = item->fbuf;
320 const char *reason = (const char *) data;
321 char message[80];
322
323 snprintf(message, sizeof(message),
324 "%s referenced mapped buffer detected.", reason);
325
326 pipe_mutex_lock(fbuf->mutex);
327 if (fbuf->mapped_sync) {
328 debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
329 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
330 fbuf->map_frame);
331 debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
332 FALSE, item->ref_frame);
333 }
334 pipe_mutex_unlock(fbuf->mutex);
335
336 return PIPE_OK;
337 }
338
339 void
340 debug_flush_might_flush(struct debug_flush_ctx *fctx)
341 {
342 if (!fctx)
343 return;
344
345 util_hash_table_foreach(fctx->ref_hash,
346 debug_flush_might_flush_cb,
347 "Might flush");
348 }
349
350 static enum pipe_error
351 debug_flush_flush_cb(void *key, void *value, void *data)
352 {
353 struct debug_flush_item *item =
354 (struct debug_flush_item *) value;
355
356 debug_flush_item_destroy(item);
357
358 return PIPE_OK;
359 }
360
361
362 void
363 debug_flush_flush(struct debug_flush_ctx *fctx)
364 {
365 if (!fctx)
366 return;
367
368 util_hash_table_foreach(fctx->ref_hash,
369 debug_flush_might_flush_cb,
370 "Flush");
371 util_hash_table_foreach(fctx->ref_hash,
372 debug_flush_flush_cb,
373 NULL);
374 util_hash_table_clear(fctx->ref_hash);
375 }
376
377 void
378 debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)
379 {
380 if (!fctx)
381 return;
382
383 list_del(&fctx->head);
384 util_hash_table_foreach(fctx->ref_hash,
385 debug_flush_flush_cb,
386 NULL);
387 util_hash_table_clear(fctx->ref_hash);
388 util_hash_table_destroy(fctx->ref_hash);
389 FREE(fctx);
390 }
391 #endif