#include "util/u_memory.h"
#include "util/u_debug_flush.h"
#include "util/u_hash_table.h"
-#include "util/u_double_list.h"
+#include "util/list.h"
#include "util/u_inlines.h"
#include "util/u_string.h"
#include "os/os_thread.h"
#include <stdio.h>
+/* Future improvement: Use realloc instead? */
+#define DEBUG_FLUSH_MAP_DEPTH 16
+
+struct debug_map_item {
+ struct debug_stack_frame *frame;
+ boolean persistent;
+};
+
struct debug_flush_buf {
/* Atomic */
struct pipe_reference reference; /* Must be the first member. */
- pipe_mutex mutex;
+ mtx_t mutex;
/* Immutable */
- boolean supports_unsync;
+ boolean supports_persistent;
unsigned bt_depth;
/* Protected by mutex */
- boolean mapped;
- boolean mapped_sync;
- struct debug_stack_frame *map_frame;
+ int map_count;
+ boolean has_sync_map;
+ int last_sync_map;
+ struct debug_map_item maps[DEBUG_FLUSH_MAP_DEPTH];
};
struct debug_flush_item {
struct list_head head;
};
-pipe_static_mutex(list_mutex);
+static mtx_t list_mutex = _MTX_INITIALIZER_NP;
static struct list_head ctx_list = {&ctx_list, &ctx_list};
static struct debug_stack_frame *
}
struct debug_flush_buf *
-debug_flush_buf_create(boolean supports_unsync, unsigned bt_depth)
+debug_flush_buf_create(boolean supports_persistent, unsigned bt_depth)
{
struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
if (!fbuf)
goto out_no_buf;
- fbuf->supports_unsync = supports_unsync;
+ fbuf->supports_persistent = supports_persistent;
fbuf->bt_depth = bt_depth;
pipe_reference_init(&fbuf->reference, 1);
- pipe_mutex_init(fbuf->mutex);
+ (void) mtx_init(&fbuf->mutex, mtx_plain);
return fbuf;
out_no_buf:
struct debug_flush_buf *fbuf = *dst;
if (pipe_reference(&(*dst)->reference, &src->reference)) {
- if (fbuf->map_frame)
- FREE(fbuf->map_frame);
+ int i;
+ for (i = 0; i < fbuf->map_count; ++i) {
+ FREE(fbuf->maps[i].frame);
+ }
FREE(fbuf);
}
{
debug_flush_buf_reference(&item->fbuf, NULL);
- if (item->ref_frame)
- FREE(item->ref_frame);
+ FREE(item->ref_frame);
FREE(item);
}
struct debug_flush_ctx *
-debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
+debug_flush_ctx_create(UNUSED boolean catch_reference_of_mapped,
+ unsigned bt_depth)
{
struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
goto out_no_ref_hash;
fctx->bt_depth = bt_depth;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
list_addtail(&fctx->head, &ctx_list);
- pipe_mutex_unlock(list_mutex);
+ mtx_unlock(&list_mutex);
return fctx;
void
debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
{
- boolean mapped_sync = FALSE;
+ boolean map_sync, persistent;
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
- if (fbuf->mapped) {
- debug_flush_alert("Recursive map detected.", "Map",
+ mtx_lock(&fbuf->mutex);
+ map_sync = !(flags & PIPE_TRANSFER_UNSYNCHRONIZED);
+ persistent = !map_sync || fbuf->supports_persistent ||
+ !!(flags & PIPE_TRANSFER_PERSISTENT);
+
+ /* Recursive maps are allowed if previous maps are persistent,
+ * or if the current map is unsync. In other cases we might flush
+ * with unpersistent maps.
+ */
+ if (fbuf->has_sync_map && !map_sync) {
+ debug_flush_alert("Recursive sync map detected.", "Map",
2, fbuf->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
- FALSE, fbuf->map_frame);
- } else if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) ||
- !fbuf->supports_unsync) {
- fbuf->mapped_sync = mapped_sync = TRUE;
+ FALSE, fbuf->maps[fbuf->last_sync_map].frame);
+ }
+
+ fbuf->maps[fbuf->map_count].frame =
+ debug_flush_capture_frame(1, fbuf->bt_depth);
+ fbuf->maps[fbuf->map_count].persistent = persistent;
+ if (!persistent) {
+ fbuf->has_sync_map = TRUE;
+ fbuf->last_sync_map = fbuf->map_count;
}
- fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
- fbuf->mapped = TRUE;
- pipe_mutex_unlock(fbuf->mutex);
- if (mapped_sync) {
+ fbuf->map_count++;
+ assert(fbuf->map_count < DEBUG_FLUSH_MAP_DEPTH);
+
+ mtx_unlock(&fbuf->mutex);
+
+ if (!persistent) {
struct debug_flush_ctx *fctx;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
struct debug_flush_item *item =
util_hash_table_get(fctx->ref_hash, fbuf);
FALSE, FALSE, item->ref_frame);
}
}
- pipe_mutex_unlock(list_mutex);
+ mtx_unlock(&list_mutex);
}
}
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
- if (!fbuf->mapped)
+ mtx_lock(&fbuf->mutex);
+ if (--fbuf->map_count < 0) {
debug_flush_alert("Unmap not previously mapped detected.", "Map",
2, fbuf->bt_depth, FALSE, TRUE, NULL);
-
- fbuf->mapped_sync = FALSE;
- fbuf->mapped = FALSE;
- if (fbuf->map_frame) {
- FREE(fbuf->map_frame);
- fbuf->map_frame = NULL;
+ } else {
+ if (fbuf->has_sync_map && fbuf->last_sync_map == fbuf->map_count) {
+ int i = fbuf->map_count;
+
+ fbuf->has_sync_map = FALSE;
+ while (i-- && !fbuf->has_sync_map) {
+ if (!fbuf->maps[i].persistent) {
+ fbuf->has_sync_map = TRUE;
+ fbuf->last_sync_map = i;
+ }
+ }
+ FREE(fbuf->maps[fbuf->map_count].frame);
+ fbuf->maps[fbuf->map_count].frame = NULL;
+ }
}
- pipe_mutex_unlock(fbuf->mutex);
+ mtx_unlock(&fbuf->mutex);
}
+
+/**
+ * Add the given buffer to the list of active buffers. Active buffers
+ * are those which are referenced by the command buffer currently being
+ * constructed.
+ */
void
debug_flush_cb_reference(struct debug_flush_ctx *fctx,
struct debug_flush_buf *fbuf)
item = util_hash_table_get(fctx->ref_hash, fbuf);
- pipe_mutex_lock(fbuf->mutex);
- if (fbuf->mapped_sync) {
+ mtx_lock(&fbuf->mutex);
+ if (fbuf->map_count && fbuf->has_sync_map) {
debug_flush_alert("Reference of mapped buffer detected.", "Reference",
2, fctx->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
- FALSE, fbuf->map_frame);
+ FALSE, fbuf->maps[fbuf->last_sync_map].frame);
}
- pipe_mutex_unlock(fbuf->mutex);
+ mtx_unlock(&fbuf->mutex);
if (!item) {
item = CALLOC_STRUCT(debug_flush_item);
}
static enum pipe_error
-debug_flush_might_flush_cb(void *key, void *value, void *data)
+debug_flush_might_flush_cb(UNUSED void *key, void *value, void *data)
{
struct debug_flush_item *item =
(struct debug_flush_item *) value;
struct debug_flush_buf *fbuf = item->fbuf;
- const char *reason = (const char *) data;
- char message[80];
- util_snprintf(message, sizeof(message),
- "%s referenced mapped buffer detected.", reason);
+ mtx_lock(&fbuf->mutex);
+ if (fbuf->map_count && fbuf->has_sync_map) {
+ const char *reason = (const char *) data;
+ char message[80];
+
+ snprintf(message, sizeof(message),
+ "%s referenced mapped buffer detected.", reason);
- pipe_mutex_lock(fbuf->mutex);
- if (fbuf->mapped_sync) {
debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
- fbuf->map_frame);
+ fbuf->maps[fbuf->last_sync_map].frame);
debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
FALSE, item->ref_frame);
}
- pipe_mutex_unlock(fbuf->mutex);
+ mtx_unlock(&fbuf->mutex);
return PIPE_OK;
}
+/**
+ * Called when we're about to possibly flush a command buffer.
+ * We check if any active buffers are in a mapped state. If so, print an alert.
+ */
void
debug_flush_might_flush(struct debug_flush_ctx *fctx)
{
}
static enum pipe_error
-debug_flush_flush_cb(void *key, void *value, void *data)
+debug_flush_flush_cb(UNUSED void *key, void *value, UNUSED void *data)
{
struct debug_flush_item *item =
(struct debug_flush_item *) value;
}
+/**
+ * Called when we flush a command buffer. Two things are done:
+ * 1. Check if any of the active buffers are currently mapped (alert if so).
+ * 2. Discard/unreference all the active buffers.
+ */
void
debug_flush_flush(struct debug_flush_ctx *fctx)
{