glthread->batches[i].ctx = ctx;
util_queue_fence_init(&glthread->batches[i].fence);
}
+ glthread->next_batch = &glthread->batches[glthread->next];
glthread->enabled = true;
glthread->stats.queue = &glthread->queue;
if (!glthread->enabled)
return;
- struct glthread_batch *next = &glthread->batches[glthread->next];
+ struct glthread_batch *next = glthread->next_batch;
if (!next->used)
return;
glthread_unmarshal_batch, NULL, 0);
glthread->last = glthread->next;
glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
+ glthread->next_batch = &glthread->batches[glthread->next];
}
/**
return;
struct glthread_batch *last = &glthread->batches[glthread->last];
- struct glthread_batch *next = &glthread->batches[glthread->next];
+ struct glthread_batch *next = glthread->next_batch;
bool synced = false;
if (!util_queue_fence_is_signalled(&last->fence)) {
/** The ring of batches in memory. */
struct glthread_batch batches[MARSHAL_MAX_BATCHES];
+ /** Pointer to the batch currently being filled. */
+ struct glthread_batch *next_batch;
+
/** Index of the last submitted batch. */
unsigned last;
int size)
{
struct glthread_state *glthread = &ctx->GLThread;
- struct glthread_batch *next = &glthread->batches[glthread->next];
+ struct glthread_batch *next = glthread->next_batch;
struct marshal_cmd_base *cmd_base;
if (unlikely(next->used + size > MARSHAL_MAX_CMD_SIZE)) {
_mesa_glthread_flush_batch(ctx);
- next = &glthread->batches[glthread->next];
+ next = glthread->next_batch;
}
const int aligned_size = align(size, 8);