return NULL;
}
- pipe_semaphore_init(&cs->flush_completed, 1);
+ util_queue_fence_init(&cs->flush_completed);
cs->ctx = ctx;
cs->flush_cs = flush;
}
}
-void amdgpu_cs_submit_ib(struct amdgpu_cs *acs)
+void amdgpu_cs_submit_ib(void *job, int thread_index)
{
+ struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
struct amdgpu_winsys *ws = acs->ctx->ws;
struct amdgpu_cs_context *cs = acs->cst;
int i, r;
void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
+ struct amdgpu_winsys *ws = cs->ctx->ws;
/* Wait for any pending ioctl of this CS to complete. */
- if (cs->ctx->ws->thread) {
- /* wait and set the semaphore to "busy" */
- pipe_semaphore_wait(&cs->flush_completed);
- /* set the semaphore to "idle" */
- pipe_semaphore_signal(&cs->flush_completed);
- }
+ if (util_queue_is_initialized(&ws->cs_queue))
+ util_queue_job_wait(&cs->flush_completed);
}
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
cs->cst = cur;
/* Submit. */
- if (ws->thread && (flags & RADEON_FLUSH_ASYNC)) {
- /* Set the semaphore to "busy". */
- pipe_semaphore_wait(&cs->flush_completed);
- amdgpu_ws_queue_cs(ws, cs);
+ if ((flags & RADEON_FLUSH_ASYNC) &&
+ util_queue_is_initialized(&ws->cs_queue)) {
+ util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
+ amdgpu_cs_submit_ib);
} else {
- amdgpu_cs_submit_ib(cs);
+ amdgpu_cs_submit_ib(cs, 0);
}
} else {
amdgpu_cs_context_cleanup(cs->csc);
struct amdgpu_cs *cs = amdgpu_cs(rcs);
amdgpu_cs_sync_flush(rcs);
- pipe_semaphore_destroy(&cs->flush_completed);
+ util_queue_fence_destroy(&cs->flush_completed);
p_atomic_dec(&cs->ctx->ws->num_cs);
pb_reference(&cs->main.big_ib_buffer, NULL);
FREE(cs->main.base.prev);