}
util_queue_add_job(&tc->queue, next, &next->fence, tc_batch_execute,
- NULL);
+ NULL, 0);
tc->last = tc->next;
tc->next = (tc->next + 1) % TC_MAX_BATCHES;
}
util_queue_add_job(&batch->ctx->flush_queue,
batch, &batch->flush_fence,
- batch_flush_func, batch_cleanup_func);
+ batch_flush_func, batch_cleanup_func, 0);
} else {
fd_gmem_render_tiles(batch);
batch_reset_resources(batch);
/* Compile it asynchronously. */
util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
shader, &shader->ready,
- si_build_shader_variant_low_priority, NULL);
+ si_build_shader_variant_low_priority, NULL,
+ 0);
/* Add only after the ready fence was reset, to guard against a
* race with si_bind_XX_shader. */
}
util_queue_add_job(&sctx->screen->shader_compiler_queue, job,
- ready_fence, execute, NULL);
+ ready_fence, execute, NULL, 0);
if (debug) {
util_queue_fence_wait(ready_fence);
/* Submit. */
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
- amdgpu_cs_submit_ib, NULL);
+ amdgpu_cs_submit_ib, NULL, 0);
/* The submission has been queued, unlock the fence now. */
simple_mtx_unlock(&ws->bo_fence_lock);
if (util_queue_is_initialized(&cs->ws->cs_queue)) {
util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
- radeon_drm_cs_emit_ioctl_oneshot, NULL);
+ radeon_drm_cs_emit_ioctl_oneshot, NULL, 0);
if (!(flags & PIPE_FLUSH_ASYNC))
radeon_drm_cs_sync_flush(rcs);
} else {
struct util_queue_fence fence;
util_queue_fence_init(&fence);
util_queue_add_job(&glthread->queue, ctx, &fence,
- glthread_thread_initialization, NULL);
+ glthread_thread_initialization, NULL, 0);
util_queue_fence_wait(&fence);
util_queue_fence_destroy(&fence);
}
p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
util_queue_add_job(&glthread->queue, next, &next->fence,
- glthread_unmarshal_batch, NULL);
+ glthread_unmarshal_batch, NULL, 0);
glthread->last = glthread->next;
glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
}
if (dc_job) {
util_queue_fence_init(&dc_job->fence);
util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence,
- cache_put, destroy_put_job);
+ cache_put, destroy_put_job, 0);
}
}
#include "util/u_thread.h"
#include "u_process.h"
+/* Define 256MB */
+#define S_256MB (256 * 1024 * 1024)
+
static void
util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
bool finish_locked);
util_queue_fence_signal(job.fence);
if (job.cleanup)
job.cleanup(job.job, thread_index);
+
+ queue->total_jobs_size -= job.job_size;
}
}
void *job,
struct util_queue_fence *fence,
util_queue_execute_func execute,
- util_queue_execute_func cleanup)
+ util_queue_execute_func cleanup,
+ const size_t job_size)
{
struct util_queue_job *ptr;
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
if (queue->num_queued == queue->max_jobs) {
- if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL) {
+ if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL &&
+ queue->total_jobs_size + job_size < S_256MB) {
/* If the queue is full, make it larger to avoid waiting for a free
* slot.
*/
ptr->fence = fence;
ptr->execute = execute;
ptr->cleanup = cleanup;
+ ptr->job_size = job_size;
+
queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
+ queue->total_jobs_size += ptr->job_size;
queue->num_queued++;
cnd_signal(&queue->has_queued_cond);
for (unsigned i = 0; i < queue->num_threads; ++i) {
util_queue_fence_init(&fences[i]);
- util_queue_add_job(queue, &barrier, &fences[i], util_queue_finish_execute, NULL);
+ util_queue_add_job(queue, &barrier, &fences[i],
+ util_queue_finish_execute, NULL, 0);
}
for (unsigned i = 0; i < queue->num_threads; ++i) {
struct util_queue_job {
void *job;
+ size_t job_size;
struct util_queue_fence *fence;
util_queue_execute_func execute;
util_queue_execute_func cleanup;
unsigned num_threads; /* decreasing this number will terminate threads */
int max_jobs;
int write_idx, read_idx; /* ring buffer pointers */
+ size_t total_jobs_size; /* memory use of all jobs in the queue */
struct util_queue_job *jobs;
/* for cleanup at exit(), protected by exit_mutex */
void *job,
struct util_queue_fence *fence,
util_queue_execute_func execute,
- util_queue_execute_func cleanup);
+ util_queue_execute_func cleanup,
+ const size_t job_size);
void util_queue_drop_job(struct util_queue *queue,
struct util_queue_fence *fence);