#include "main/mtypes.h"
#include "main/glthread.h"
-#include "main/marshal.h"
+#include "main/glthread_marshal.h"
+#include "main/hash.h"
#include "util/u_atomic.h"
#include "util/u_thread.h"
{
struct gl_context *ctx = (struct gl_context*)job;
- ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread->stats);
+ ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread.stats);
_glapi_set_context(ctx);
}
void
_mesa_glthread_init(struct gl_context *ctx)
{
- struct glthread_state *glthread = calloc(1, sizeof(*glthread));
+ struct glthread_state *glthread = &ctx->GLThread;
- if (!glthread)
- return;
+ assert(!glthread->enabled);
if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
1, 0)) {
- free(glthread);
return;
}
+ glthread->VAOs = _mesa_NewHashTable();
+ if (!glthread->VAOs) {
+ util_queue_destroy(&glthread->queue);
+ return;
+ }
+
+ _mesa_glthread_reset_vao(&glthread->DefaultVAO);
+ glthread->CurrentVAO = &glthread->DefaultVAO;
+
ctx->MarshalExec = _mesa_create_marshal_table(ctx);
if (!ctx->MarshalExec) {
+ _mesa_DeleteHashTable(glthread->VAOs);
util_queue_destroy(&glthread->queue);
- free(glthread);
return;
}
glthread->batches[i].ctx = ctx;
util_queue_fence_init(&glthread->batches[i].fence);
}
+ glthread->next_batch = &glthread->batches[glthread->next];
+ glthread->enabled = true;
glthread->stats.queue = &glthread->queue;
+
+ glthread->SupportsBufferUploads =
+ ctx->Const.BufferCreateMapUnsynchronizedThreadSafe &&
+ ctx->Const.AllowMappedBuffersDuringExecution;
+
+ /* If the draw start index is non-zero, glthread can upload to offset 0,
+ * which means the attrib offset has to be -(first * stride).
+ * So require signed vertex buffer offsets.
+ */
+ glthread->SupportsNonVBOUploads = glthread->SupportsBufferUploads &&
+ ctx->Const.VertexBufferOffsetIsInt32;
+
ctx->CurrentClientDispatch = ctx->MarshalExec;
- ctx->GLThread = glthread;
/* Execute the thread initialization function in the thread. */
struct util_queue_fence fence;
util_queue_fence_destroy(&fence);
}
+static void
+free_vao(GLuint key, void *data, void *userData)
+{
+ free(data);
+}
+
void
_mesa_glthread_destroy(struct gl_context *ctx)
{
- struct glthread_state *glthread = ctx->GLThread;
+ struct glthread_state *glthread = &ctx->GLThread;
- if (!glthread)
+ if (!glthread->enabled)
return;
_mesa_glthread_finish(ctx);
for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
util_queue_fence_destroy(&glthread->batches[i].fence);
- free(glthread);
- ctx->GLThread = NULL;
+ _mesa_HashDeleteAll(glthread->VAOs, free_vao, NULL);
+ _mesa_DeleteHashTable(glthread->VAOs);
+
+ ctx->GLThread.enabled = false;
_mesa_glthread_restore_dispatch(ctx, "destroy");
}
void
_mesa_glthread_flush_batch(struct gl_context *ctx)
{
- struct glthread_state *glthread = ctx->GLThread;
- if (!glthread)
+ struct glthread_state *glthread = &ctx->GLThread;
+ if (!glthread->enabled)
return;
- struct glthread_batch *next = &glthread->batches[glthread->next];
+ struct glthread_batch *next = glthread->next_batch;
if (!next->used)
return;
glthread_unmarshal_batch, NULL, 0);
glthread->last = glthread->next;
glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
+ glthread->next_batch = &glthread->batches[glthread->next];
}
/**
void
_mesa_glthread_finish(struct gl_context *ctx)
{
- struct glthread_state *glthread = ctx->GLThread;
- if (!glthread)
+ struct glthread_state *glthread = &ctx->GLThread;
+ if (!glthread->enabled)
return;
/* If this is called from the worker thread, then we've hit a path that
return;
struct glthread_batch *last = &glthread->batches[glthread->last];
- struct glthread_batch *next = &glthread->batches[glthread->next];
+ struct glthread_batch *next = glthread->next_batch;
bool synced = false;
if (!util_queue_fence_is_signalled(&last->fence)) {
_mesa_glthread_finish_before(struct gl_context *ctx, const char *func)
{
_mesa_glthread_finish(ctx);
- debug_print_sync_fallback(func);
+
+ /* Uncomment this if you want to know where glthread syncs. */
+ /*printf("fallback to sync: %s\n", func);*/
}