if (job.job) {
job.execute(job.job, thread_index);
util_queue_fence_signal(job.fence);
+ if (job.cleanup)
+ job.cleanup(job.job, thread_index);
}
}
util_queue_add_job(struct util_queue *queue,
void *job,
struct util_queue_fence *fence,
- util_queue_execute_func execute)
+ util_queue_execute_func execute,
+ util_queue_execute_func cleanup)
{
struct util_queue_job *ptr;
ptr->job = job;
ptr->fence = fence;
ptr->execute = execute;
+ ptr->cleanup = cleanup;
queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
queue->num_queued++;
void *job;
struct util_queue_fence *fence;
util_queue_execute_func execute;
+ util_queue_execute_func cleanup;
};
/* Put this into your context. */
void util_queue_fence_init(struct util_queue_fence *fence);
void util_queue_fence_destroy(struct util_queue_fence *fence);
+/* optional cleanup callback is called after fence is signaled: */
void util_queue_add_job(struct util_queue *queue,
void *job,
struct util_queue_fence *fence,
- util_queue_execute_func execute);
+ util_queue_execute_func execute,
+ util_queue_execute_func cleanup);
+
void util_queue_job_wait(struct util_queue_fence *fence);
/* util_queue needs to be cleared to zeroes for this to work */
si_init_shader_selector_async(sel, -1);
else
util_queue_add_job(&sscreen->shader_compiler_queue, sel,
- &sel->ready, si_init_shader_selector_async);
+ &sel->ready, si_init_shader_selector_async,
+ NULL);
return sel;
}
if ((flags & RADEON_FLUSH_ASYNC) &&
util_queue_is_initialized(&ws->cs_queue)) {
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
- amdgpu_cs_submit_ib);
+ amdgpu_cs_submit_ib, NULL);
} else {
amdgpu_cs_submit_ib(cs, 0);
error_code = cs->cst->error_code;
if (util_queue_is_initialized(&cs->ws->cs_queue)) {
util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
- radeon_drm_cs_emit_ioctl_oneshot);
+ radeon_drm_cs_emit_ioctl_oneshot, NULL);
if (!(flags & RADEON_FLUSH_ASYNC))
radeon_drm_cs_sync_flush(rcs);
} else {