gallium/util: add some comments in u_debug_flush.c
[mesa.git] / src / gallium / auxiliary / util / u_debug_flush.c
1 /**************************************************************************
2 *
3 * Copyright 2012 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * u_debug_flush.c Debug flush and map-related issues:
31 * - Flush while synchronously mapped.
32 * - Command stream reference while synchronously mapped.
33 * - Synchronous map while referenced on command stream.
34 * - Recursive maps.
35 * - Unmap while not mapped.
36 *
37 * @author Thomas Hellstrom <thellstrom@vmware.com>
38 */
39
40 #ifdef DEBUG
41 #include "pipe/p_compiler.h"
42 #include "util/u_debug_stack.h"
43 #include "util/u_debug.h"
44 #include "util/u_memory.h"
45 #include "util/u_debug_flush.h"
46 #include "util/u_hash_table.h"
47 #include "util/list.h"
48 #include "util/u_inlines.h"
49 #include "util/u_string.h"
50 #include "os/os_thread.h"
51 #include <stdio.h>
52
53 struct debug_flush_buf {
54 /* Atomic */
55 struct pipe_reference reference; /* Must be the first member. */
56 mtx_t mutex;
57 /* Immutable */
58 boolean supports_unsync;
59 unsigned bt_depth;
60 /* Protected by mutex */
61 boolean mapped;
62 boolean mapped_sync;
63 struct debug_stack_frame *map_frame;
64 };
65
66 struct debug_flush_item {
67 struct debug_flush_buf *fbuf;
68 unsigned bt_depth;
69 struct debug_stack_frame *ref_frame;
70 };
71
72 struct debug_flush_ctx {
73 /* Contexts are used by a single thread at a time */
74 unsigned bt_depth;
75 boolean catch_map_of_referenced;
76 struct util_hash_table *ref_hash;
77 struct list_head head;
78 };
79
80 static mtx_t list_mutex = _MTX_INITIALIZER_NP;
81 static struct list_head ctx_list = {&ctx_list, &ctx_list};
82
83 static struct debug_stack_frame *
84 debug_flush_capture_frame(int start, int depth)
85 {
86 struct debug_stack_frame *frames;
87
88 frames = CALLOC(depth, sizeof(*frames));
89 if (!frames)
90 return NULL;
91
92 debug_backtrace_capture(frames, start, depth);
93 return frames;
94 }
95
96 static int
97 debug_flush_pointer_compare(void *key1, void *key2)
98 {
99 return (key1 == key2) ? 0 : 1;
100 }
101
102 static unsigned
103 debug_flush_pointer_hash(void *key)
104 {
105 return (unsigned) (unsigned long) key;
106 }
107
108 struct debug_flush_buf *
109 debug_flush_buf_create(boolean supports_unsync, unsigned bt_depth)
110 {
111 struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
112
113 if (!fbuf)
114 goto out_no_buf;
115
116 fbuf->supports_unsync = supports_unsync;
117 fbuf->bt_depth = bt_depth;
118 pipe_reference_init(&fbuf->reference, 1);
119 (void) mtx_init(&fbuf->mutex, mtx_plain);
120
121 return fbuf;
122 out_no_buf:
123 debug_printf("Debug flush buffer creation failed.\n");
124 debug_printf("Debug flush checking for this buffer will be incomplete.\n");
125 return NULL;
126 }
127
128 void
129 debug_flush_buf_reference(struct debug_flush_buf **dst,
130 struct debug_flush_buf *src)
131 {
132 struct debug_flush_buf *fbuf = *dst;
133
134 if (pipe_reference(&(*dst)->reference, &src->reference)) {
135 FREE(fbuf->map_frame);
136
137 FREE(fbuf);
138 }
139
140 *dst = src;
141 }
142
143 static void
144 debug_flush_item_destroy(struct debug_flush_item *item)
145 {
146 debug_flush_buf_reference(&item->fbuf, NULL);
147
148 FREE(item->ref_frame);
149
150 FREE(item);
151 }
152
153 struct debug_flush_ctx *
154 debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
155 {
156 struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
157
158 if (!fctx)
159 goto out_no_ctx;
160
161 fctx->ref_hash = util_hash_table_create(debug_flush_pointer_hash,
162 debug_flush_pointer_compare);
163
164 if (!fctx->ref_hash)
165 goto out_no_ref_hash;
166
167 fctx->bt_depth = bt_depth;
168 mtx_lock(&list_mutex);
169 list_addtail(&fctx->head, &ctx_list);
170 mtx_unlock(&list_mutex);
171
172 return fctx;
173
174 out_no_ref_hash:
175 FREE(fctx);
176 out_no_ctx:
177 debug_printf("Debug flush context creation failed.\n");
178 debug_printf("Debug flush checking for this context will be incomplete.\n");
179 return NULL;
180 }
181
182 static void
183 debug_flush_alert(const char *s, const char *op,
184 unsigned start, unsigned depth,
185 boolean continued,
186 boolean capture,
187 const struct debug_stack_frame *frame)
188 {
189 if (capture)
190 frame = debug_flush_capture_frame(start, depth);
191
192 if (s)
193 debug_printf("%s ", s);
194 if (frame) {
195 debug_printf("%s backtrace follows:\n", op);
196 debug_backtrace_dump(frame, depth);
197 } else
198 debug_printf("No %s backtrace was captured.\n", op);
199
200 if (continued)
201 debug_printf("**********************************\n");
202 else
203 debug_printf("*********END OF MESSAGE***********\n\n\n");
204
205 if (capture)
206 FREE((void *)frame);
207 }
208
209
210 void
211 debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
212 {
213 boolean mapped_sync = FALSE;
214
215 if (!fbuf)
216 return;
217
218 mtx_lock(&fbuf->mutex);
219 if (fbuf->mapped) {
220 debug_flush_alert("Recursive map detected.", "Map",
221 2, fbuf->bt_depth, TRUE, TRUE, NULL);
222 debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
223 FALSE, fbuf->map_frame);
224 } else if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) ||
225 !fbuf->supports_unsync) {
226 fbuf->mapped_sync = mapped_sync = TRUE;
227 }
228 fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
229 fbuf->mapped = TRUE;
230 mtx_unlock(&fbuf->mutex);
231
232 if (mapped_sync) {
233 struct debug_flush_ctx *fctx;
234
235 mtx_lock(&list_mutex);
236 LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
237 struct debug_flush_item *item =
238 util_hash_table_get(fctx->ref_hash, fbuf);
239
240 if (item && fctx->catch_map_of_referenced) {
241 debug_flush_alert("Already referenced map detected.",
242 "Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
243 debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
244 FALSE, FALSE, item->ref_frame);
245 }
246 }
247 mtx_unlock(&list_mutex);
248 }
249 }
250
251 void
252 debug_flush_unmap(struct debug_flush_buf *fbuf)
253 {
254 if (!fbuf)
255 return;
256
257 mtx_lock(&fbuf->mutex);
258 if (!fbuf->mapped)
259 debug_flush_alert("Unmap not previously mapped detected.", "Map",
260 2, fbuf->bt_depth, FALSE, TRUE, NULL);
261
262 fbuf->mapped_sync = FALSE;
263 fbuf->mapped = FALSE;
264 FREE(fbuf->map_frame);
265 fbuf->map_frame = NULL;
266 mtx_unlock(&fbuf->mutex);
267 }
268
269
270 /**
271 * Add the given buffer to the list of active buffers. Active buffers
272 * are those which are referenced by the command buffer currently being
273 * constructed.
274 */
275 void
276 debug_flush_cb_reference(struct debug_flush_ctx *fctx,
277 struct debug_flush_buf *fbuf)
278 {
279 struct debug_flush_item *item;
280
281 if (!fctx || !fbuf)
282 return;
283
284 item = util_hash_table_get(fctx->ref_hash, fbuf);
285
286 mtx_lock(&fbuf->mutex);
287 if (fbuf->mapped_sync) {
288 debug_flush_alert("Reference of mapped buffer detected.", "Reference",
289 2, fctx->bt_depth, TRUE, TRUE, NULL);
290 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
291 FALSE, fbuf->map_frame);
292 }
293 mtx_unlock(&fbuf->mutex);
294
295 if (!item) {
296 item = CALLOC_STRUCT(debug_flush_item);
297 if (item) {
298 debug_flush_buf_reference(&item->fbuf, fbuf);
299 item->bt_depth = fctx->bt_depth;
300 item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
301 if (util_hash_table_set(fctx->ref_hash, fbuf, item) != PIPE_OK) {
302 debug_flush_item_destroy(item);
303 goto out_no_item;
304 }
305 return;
306 }
307 goto out_no_item;
308 }
309 return;
310
311 out_no_item:
312 debug_printf("Debug flush command buffer reference creation failed.\n");
313 debug_printf("Debug flush checking will be incomplete "
314 "for this command batch.\n");
315 }
316
317 static enum pipe_error
318 debug_flush_might_flush_cb(void *key, void *value, void *data)
319 {
320 struct debug_flush_item *item =
321 (struct debug_flush_item *) value;
322 struct debug_flush_buf *fbuf = item->fbuf;
323 const char *reason = (const char *) data;
324 char message[80];
325
326 util_snprintf(message, sizeof(message),
327 "%s referenced mapped buffer detected.", reason);
328
329 mtx_lock(&fbuf->mutex);
330 if (fbuf->mapped_sync) {
331 debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
332 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
333 fbuf->map_frame);
334 debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
335 FALSE, item->ref_frame);
336 }
337 mtx_unlock(&fbuf->mutex);
338
339 return PIPE_OK;
340 }
341
342 /**
343 * Called when we're about to possibly flush a command buffer.
344 * We check if any active buffers are in a mapped state. If so, print an alert.
345 */
346 void
347 debug_flush_might_flush(struct debug_flush_ctx *fctx)
348 {
349 if (!fctx)
350 return;
351
352 util_hash_table_foreach(fctx->ref_hash,
353 debug_flush_might_flush_cb,
354 "Might flush");
355 }
356
357 static enum pipe_error
358 debug_flush_flush_cb(void *key, void *value, void *data)
359 {
360 struct debug_flush_item *item =
361 (struct debug_flush_item *) value;
362
363 debug_flush_item_destroy(item);
364
365 return PIPE_OK;
366 }
367
368
369 /**
370 * Called when we flush a command buffer. Two things are done:
371 * 1. Check if any of the active buffers are currently mapped (alert if so).
372 * 2. Discard/unreference all the active buffers.
373 */
374 void
375 debug_flush_flush(struct debug_flush_ctx *fctx)
376 {
377 if (!fctx)
378 return;
379
380 util_hash_table_foreach(fctx->ref_hash,
381 debug_flush_might_flush_cb,
382 "Flush");
383 util_hash_table_foreach(fctx->ref_hash,
384 debug_flush_flush_cb,
385 NULL);
386 util_hash_table_clear(fctx->ref_hash);
387 }
388
389 void
390 debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)
391 {
392 if (!fctx)
393 return;
394
395 list_del(&fctx->head);
396 util_hash_table_foreach(fctx->ref_hash,
397 debug_flush_flush_cb,
398 NULL);
399 util_hash_table_clear(fctx->ref_hash);
400 util_hash_table_destroy(fctx->ref_hash);
401 FREE(fctx);
402 }
403 #endif