* from the queue before being executed, so keep one tc_batch slot for that
* execution. Also, keep one unused slot for an unflushed batch.
*/
- if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1))
+ if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1, 0))
goto fail;
for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
fd_batch_reference(&tmp, batch);
if (!util_queue_is_initialized(&batch->ctx->flush_queue))
- util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1);
+ util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
util_queue_add_job(&batch->ctx->flush_queue,
batch, &batch->flush_fence,
num_compiler_threads = MIN2(num_cpus, ARRAY_SIZE(sscreen->tm));
if (!util_queue_init(&sscreen->shader_compiler_queue, "si_shader",
- 32, num_compiler_threads)) {
+ 32, num_compiler_threads, 0)) {
si_destroy_shader_cache(sscreen);
FREE(sscreen);
return NULL;
(void) mtx_init(&ws->global_bo_list_lock, mtx_plain);
(void) mtx_init(&ws->bo_fence_lock, mtx_plain);
- if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
+ if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1, 0)) {
amdgpu_winsys_destroy(&ws->base);
mtx_unlock(&dev_tab_mutex);
return NULL;
ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
if (ws->num_cpus > 1 && debug_get_option_thread())
- util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1);
+ util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1, 0);
/* Create the screen at the end. The winsys must be initialized
* completely.
* really care about getting things to disk quickly just that it's not
* blocking other tasks.
*/
- util_queue_init(&cache->cache_queue, "disk_cache", 32, 1);
+ util_queue_init(&cache->cache_queue, "disk_cache", 32, 1, 0);
/* Create driver id keys */
size_t ts_size = strlen(timestamp) + 1;
u_thread_setname(name);
}
+ if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
+#if defined(__linux__)
+ struct sched_param sched_param = {0};
+
+ /* The nice() function can only set a maximum of 19.
+ * SCHED_IDLE is the same as nice = 20.
+ *
+ * Note that Linux only allows decreasing the priority. The original
+ * priority can't be restored.
+ */
+ pthread_setschedparam(queue->threads[thread_index], SCHED_IDLE,
+ &sched_param);
+#endif
+ }
+
while (1) {
struct util_queue_job job;
util_queue_init(struct util_queue *queue,
const char *name,
unsigned max_jobs,
- unsigned num_threads)
+ unsigned num_threads,
+ unsigned flags)
{
unsigned i;
memset(queue, 0, sizeof(*queue));
queue->name = name;
queue->num_threads = num_threads;
+ queue->flags = flags;
queue->max_jobs = max_jobs;
queue->jobs = (struct util_queue_job*)
extern "C" {
#endif
+#define UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY (1 << 0)
+
/* Job completion fence.
* Put this into your job structure.
*/
thrd_t *threads;
int num_queued;
unsigned num_threads;
+ unsigned flags;
int kill_threads;
int max_jobs;
int write_idx, read_idx; /* ring buffer pointers */
bool util_queue_init(struct util_queue *queue,
const char *name,
unsigned max_jobs,
- unsigned num_threads);
+ unsigned num_threads,
+ unsigned flags);
void util_queue_destroy(struct util_queue *queue);
void util_queue_fence_init(struct util_queue_fence *fence);
void util_queue_fence_destroy(struct util_queue_fence *fence);