util/u_queue: track job size and limit the size of queue growth
[mesa.git] / src / mesa / main / glthread.c
index d467298b639c4be564ff21b63523cd7a46868151..82baad597f92168f9c1ec09d8092ff82644fd5e8 100644 (file)
@@ -36,6 +36,7 @@
 #include "main/glthread.h"
 #include "main/marshal.h"
 #include "main/marshal_generated.h"
+#include "util/u_atomic.h"
 #include "util/u_thread.h"
 
 
@@ -60,7 +61,7 @@ glthread_thread_initialization(void *job, int thread_index)
 {
    struct gl_context *ctx = (struct gl_context*)job;
 
-   ctx->Driver.SetBackgroundContext(ctx);
+   ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread->stats);
    _glapi_set_context(ctx);
 }
 
@@ -72,7 +73,7 @@ _mesa_glthread_init(struct gl_context *ctx)
    if (!glthread)
       return;
 
-   if (!util_queue_init(&glthread->queue, "glthread", MARSHAL_MAX_BATCHES - 2,
+   if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
                         1, 0)) {
       free(glthread);
       return;
@@ -90,6 +91,7 @@ _mesa_glthread_init(struct gl_context *ctx)
       util_queue_fence_init(&glthread->batches[i].fence);
    }
 
+   glthread->stats.queue = &glthread->queue;
    ctx->CurrentClientDispatch = ctx->MarshalExec;
    ctx->GLThread = glthread;
 
@@ -97,7 +99,7 @@ _mesa_glthread_init(struct gl_context *ctx)
    struct util_queue_fence fence;
    util_queue_fence_init(&fence);
    util_queue_add_job(&glthread->queue, ctx, &fence,
-                      glthread_thread_initialization, NULL);
+                      glthread_thread_initialization, NULL, 0);
    util_queue_fence_wait(&fence);
    util_queue_fence_destroy(&fence);
 }
@@ -119,11 +121,11 @@ _mesa_glthread_destroy(struct gl_context *ctx)
    free(glthread);
    ctx->GLThread = NULL;
 
-   _mesa_glthread_restore_dispatch(ctx);
+   _mesa_glthread_restore_dispatch(ctx, "destroy");
 }
 
 void
-_mesa_glthread_restore_dispatch(struct gl_context *ctx)
+_mesa_glthread_restore_dispatch(struct gl_context *ctx, const char *func)
 {
    /* Remove ourselves from the dispatch table except if another ctx/thread
     * already installed a new dispatch table.
@@ -134,6 +136,9 @@ _mesa_glthread_restore_dispatch(struct gl_context *ctx)
    if (_glapi_get_dispatch() == ctx->MarshalExec) {
        ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
        _glapi_set_dispatch(ctx->CurrentClientDispatch);
+#if 0
+       printf("glthread disabled: %s\n", func);
+#endif
    }
 }
 
@@ -159,8 +164,10 @@ _mesa_glthread_flush_batch(struct gl_context *ctx)
       return;
    }
 
+   p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
+
    util_queue_add_job(&glthread->queue, next, &next->fence,
-                      glthread_unmarshal_batch, NULL);
+                      glthread_unmarshal_batch, NULL, 0);
    glthread->last = glthread->next;
    glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
 }
@@ -188,16 +195,29 @@ _mesa_glthread_finish(struct gl_context *ctx)
 
    struct glthread_batch *last = &glthread->batches[glthread->last];
    struct glthread_batch *next = &glthread->batches[glthread->next];
+   bool synced = false;
 
-   if (!util_queue_fence_is_signalled(&last->fence))
+   if (!util_queue_fence_is_signalled(&last->fence)) {
       util_queue_fence_wait(&last->fence);
+      synced = true;
+   }
 
    if (next->used) {
+      p_atomic_add(&glthread->stats.num_direct_items, next->used);
+
       /* Since glthread_unmarshal_batch changes the dispatch to direct,
        * restore it after it's done.
        */
       struct _glapi_table *dispatch = _glapi_get_dispatch();
       glthread_unmarshal_batch(next, 0);
       _glapi_set_dispatch(dispatch);
+
+      /* It's not a sync because we don't enqueue partial batches, but
+       * it would be a sync if we did. So count it anyway.
+       */
+      synced = true;
    }
+
+   if (synced)
+      p_atomic_inc(&glthread->stats.num_syncs);
 }