1 /**************************************************************************
3 * Copyright 2012 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * u_debug_flush.c Debug flush and map-related issues:
31 * - Flush while synchronously mapped.
32 * - Command stream reference while synchronously mapped.
33 * - Synchronous map while referenced on command stream.
35 * - Unmap while not mapped.
37 * @author Thomas Hellstrom <thellstrom@vmware.com>
41 #include "pipe/p_compiler.h"
42 #include "util/u_debug_stack.h"
43 #include "util/u_debug.h"
44 #include "util/u_memory.h"
45 #include "util/u_debug_flush.h"
46 #include "util/u_hash_table.h"
47 #include "util/list.h"
48 #include "util/u_inlines.h"
49 #include "util/u_string.h"
50 #include "os/os_thread.h"
53 /* Future improvement: Use realloc instead? */
54 #define DEBUG_FLUSH_MAP_DEPTH 16
56 struct debug_map_item
{
57 struct debug_stack_frame
*frame
;
61 struct debug_flush_buf
{
63 struct pipe_reference reference
; /* Must be the first member. */
66 boolean supports_persistent
;
68 /* Protected by mutex */
72 struct debug_map_item maps
[DEBUG_FLUSH_MAP_DEPTH
];
75 struct debug_flush_item
{
76 struct debug_flush_buf
*fbuf
;
78 struct debug_stack_frame
*ref_frame
;
81 struct debug_flush_ctx
{
82 /* Contexts are used by a single thread at a time */
84 boolean catch_map_of_referenced
;
85 struct util_hash_table
*ref_hash
;
86 struct list_head head
;
89 static mtx_t list_mutex
= _MTX_INITIALIZER_NP
;
90 static struct list_head ctx_list
= {&ctx_list
, &ctx_list
};
92 static struct debug_stack_frame
*
93 debug_flush_capture_frame(int start
, int depth
)
95 struct debug_stack_frame
*frames
;
97 frames
= CALLOC(depth
, sizeof(*frames
));
101 debug_backtrace_capture(frames
, start
, depth
);
106 debug_flush_pointer_compare(void *key1
, void *key2
)
108 return (key1
== key2
) ? 0 : 1;
112 debug_flush_pointer_hash(void *key
)
114 return (unsigned) (unsigned long) key
;
117 struct debug_flush_buf
*
118 debug_flush_buf_create(boolean supports_persistent
, unsigned bt_depth
)
120 struct debug_flush_buf
*fbuf
= CALLOC_STRUCT(debug_flush_buf
);
125 fbuf
->supports_persistent
= supports_persistent
;
126 fbuf
->bt_depth
= bt_depth
;
127 pipe_reference_init(&fbuf
->reference
, 1);
128 (void) mtx_init(&fbuf
->mutex
, mtx_plain
);
132 debug_printf("Debug flush buffer creation failed.\n");
133 debug_printf("Debug flush checking for this buffer will be incomplete.\n");
138 debug_flush_buf_reference(struct debug_flush_buf
**dst
,
139 struct debug_flush_buf
*src
)
141 struct debug_flush_buf
*fbuf
= *dst
;
143 if (pipe_reference(&(*dst
)->reference
, &src
->reference
)) {
146 for (i
= 0; i
< fbuf
->map_count
; ++i
) {
147 FREE(fbuf
->maps
[i
].frame
);
156 debug_flush_item_destroy(struct debug_flush_item
*item
)
158 debug_flush_buf_reference(&item
->fbuf
, NULL
);
160 FREE(item
->ref_frame
);
165 struct debug_flush_ctx
*
166 debug_flush_ctx_create(UNUSED boolean catch_reference_of_mapped
,
169 struct debug_flush_ctx
*fctx
= CALLOC_STRUCT(debug_flush_ctx
);
174 fctx
->ref_hash
= util_hash_table_create(debug_flush_pointer_hash
,
175 debug_flush_pointer_compare
);
178 goto out_no_ref_hash
;
180 fctx
->bt_depth
= bt_depth
;
181 mtx_lock(&list_mutex
);
182 list_addtail(&fctx
->head
, &ctx_list
);
183 mtx_unlock(&list_mutex
);
190 debug_printf("Debug flush context creation failed.\n");
191 debug_printf("Debug flush checking for this context will be incomplete.\n");
196 debug_flush_alert(const char *s
, const char *op
,
197 unsigned start
, unsigned depth
,
200 const struct debug_stack_frame
*frame
)
203 frame
= debug_flush_capture_frame(start
, depth
);
206 debug_printf("%s ", s
);
208 debug_printf("%s backtrace follows:\n", op
);
209 debug_backtrace_dump(frame
, depth
);
211 debug_printf("No %s backtrace was captured.\n", op
);
214 debug_printf("**********************************\n");
216 debug_printf("*********END OF MESSAGE***********\n\n\n");
224 debug_flush_map(struct debug_flush_buf
*fbuf
, unsigned flags
)
226 boolean map_sync
, persistent
;
231 mtx_lock(&fbuf
->mutex
);
232 map_sync
= !(flags
& PIPE_TRANSFER_UNSYNCHRONIZED
);
233 persistent
= !map_sync
|| fbuf
->supports_persistent
||
234 !!(flags
& PIPE_TRANSFER_PERSISTENT
);
236 /* Recursive maps are allowed if previous maps are persistent,
237 * or if the current map is unsync. In other cases we might flush
238 * with unpersistent maps.
240 if (fbuf
->has_sync_map
&& !map_sync
) {
241 debug_flush_alert("Recursive sync map detected.", "Map",
242 2, fbuf
->bt_depth
, TRUE
, TRUE
, NULL
);
243 debug_flush_alert(NULL
, "Previous map", 0, fbuf
->bt_depth
, FALSE
,
244 FALSE
, fbuf
->maps
[fbuf
->last_sync_map
].frame
);
247 fbuf
->maps
[fbuf
->map_count
].frame
=
248 debug_flush_capture_frame(1, fbuf
->bt_depth
);
249 fbuf
->maps
[fbuf
->map_count
].persistent
= persistent
;
251 fbuf
->has_sync_map
= TRUE
;
252 fbuf
->last_sync_map
= fbuf
->map_count
;
256 assert(fbuf
->map_count
< DEBUG_FLUSH_MAP_DEPTH
);
258 mtx_unlock(&fbuf
->mutex
);
261 struct debug_flush_ctx
*fctx
;
263 mtx_lock(&list_mutex
);
264 LIST_FOR_EACH_ENTRY(fctx
, &ctx_list
, head
) {
265 struct debug_flush_item
*item
=
266 util_hash_table_get(fctx
->ref_hash
, fbuf
);
268 if (item
&& fctx
->catch_map_of_referenced
) {
269 debug_flush_alert("Already referenced map detected.",
270 "Map", 2, fbuf
->bt_depth
, TRUE
, TRUE
, NULL
);
271 debug_flush_alert(NULL
, "Reference", 0, item
->bt_depth
,
272 FALSE
, FALSE
, item
->ref_frame
);
275 mtx_unlock(&list_mutex
);
280 debug_flush_unmap(struct debug_flush_buf
*fbuf
)
285 mtx_lock(&fbuf
->mutex
);
286 if (--fbuf
->map_count
< 0) {
287 debug_flush_alert("Unmap not previously mapped detected.", "Map",
288 2, fbuf
->bt_depth
, FALSE
, TRUE
, NULL
);
290 if (fbuf
->has_sync_map
&& fbuf
->last_sync_map
== fbuf
->map_count
) {
291 int i
= fbuf
->map_count
;
293 fbuf
->has_sync_map
= FALSE
;
294 while (i
-- && !fbuf
->has_sync_map
) {
295 if (!fbuf
->maps
[i
].persistent
) {
296 fbuf
->has_sync_map
= TRUE
;
297 fbuf
->last_sync_map
= i
;
300 FREE(fbuf
->maps
[fbuf
->map_count
].frame
);
301 fbuf
->maps
[fbuf
->map_count
].frame
= NULL
;
304 mtx_unlock(&fbuf
->mutex
);
309 * Add the given buffer to the list of active buffers. Active buffers
310 * are those which are referenced by the command buffer currently being
314 debug_flush_cb_reference(struct debug_flush_ctx
*fctx
,
315 struct debug_flush_buf
*fbuf
)
317 struct debug_flush_item
*item
;
322 item
= util_hash_table_get(fctx
->ref_hash
, fbuf
);
324 mtx_lock(&fbuf
->mutex
);
325 if (fbuf
->map_count
&& fbuf
->has_sync_map
) {
326 debug_flush_alert("Reference of mapped buffer detected.", "Reference",
327 2, fctx
->bt_depth
, TRUE
, TRUE
, NULL
);
328 debug_flush_alert(NULL
, "Map", 0, fbuf
->bt_depth
, FALSE
,
329 FALSE
, fbuf
->maps
[fbuf
->last_sync_map
].frame
);
331 mtx_unlock(&fbuf
->mutex
);
334 item
= CALLOC_STRUCT(debug_flush_item
);
336 debug_flush_buf_reference(&item
->fbuf
, fbuf
);
337 item
->bt_depth
= fctx
->bt_depth
;
338 item
->ref_frame
= debug_flush_capture_frame(2, item
->bt_depth
);
339 if (util_hash_table_set(fctx
->ref_hash
, fbuf
, item
) != PIPE_OK
) {
340 debug_flush_item_destroy(item
);
350 debug_printf("Debug flush command buffer reference creation failed.\n");
351 debug_printf("Debug flush checking will be incomplete "
352 "for this command batch.\n");
355 static enum pipe_error
356 debug_flush_might_flush_cb(UNUSED
void *key
, void *value
, void *data
)
358 struct debug_flush_item
*item
=
359 (struct debug_flush_item
*) value
;
360 struct debug_flush_buf
*fbuf
= item
->fbuf
;
362 mtx_lock(&fbuf
->mutex
);
363 if (fbuf
->map_count
&& fbuf
->has_sync_map
) {
364 const char *reason
= (const char *) data
;
367 util_snprintf(message
, sizeof(message
),
368 "%s referenced mapped buffer detected.", reason
);
370 debug_flush_alert(message
, reason
, 3, item
->bt_depth
, TRUE
, TRUE
, NULL
);
371 debug_flush_alert(NULL
, "Map", 0, fbuf
->bt_depth
, TRUE
, FALSE
,
372 fbuf
->maps
[fbuf
->last_sync_map
].frame
);
373 debug_flush_alert(NULL
, "First reference", 0, item
->bt_depth
, FALSE
,
374 FALSE
, item
->ref_frame
);
376 mtx_unlock(&fbuf
->mutex
);
382 * Called when we're about to possibly flush a command buffer.
383 * We check if any active buffers are in a mapped state. If so, print an alert.
386 debug_flush_might_flush(struct debug_flush_ctx
*fctx
)
391 util_hash_table_foreach(fctx
->ref_hash
,
392 debug_flush_might_flush_cb
,
396 static enum pipe_error
397 debug_flush_flush_cb(UNUSED
void *key
, void *value
, UNUSED
void *data
)
399 struct debug_flush_item
*item
=
400 (struct debug_flush_item
*) value
;
402 debug_flush_item_destroy(item
);
409 * Called when we flush a command buffer. Two things are done:
410 * 1. Check if any of the active buffers are currently mapped (alert if so).
411 * 2. Discard/unreference all the active buffers.
414 debug_flush_flush(struct debug_flush_ctx
*fctx
)
419 util_hash_table_foreach(fctx
->ref_hash
,
420 debug_flush_might_flush_cb
,
422 util_hash_table_foreach(fctx
->ref_hash
,
423 debug_flush_flush_cb
,
425 util_hash_table_clear(fctx
->ref_hash
);
429 debug_flush_ctx_destroy(struct debug_flush_ctx
*fctx
)
434 list_del(&fctx
->head
);
435 util_hash_table_foreach(fctx
->ref_hash
,
436 debug_flush_flush_cb
,
438 util_hash_table_clear(fctx
->ref_hash
);
439 util_hash_table_destroy(fctx
->ref_hash
);