util/u_queue: add an option to set the minimum thread priority
authorMarek Olšák <marek.olsak@amd.com>
Wed, 31 May 2017 20:04:29 +0000 (22:04 +0200)
committerMarek Olšák <marek.olsak@amd.com>
Wed, 7 Jun 2017 16:43:42 +0000 (18:43 +0200)
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
src/gallium/auxiliary/util/u_threaded_context.c
src/gallium/drivers/freedreno/freedreno_batch.c
src/gallium/drivers/radeonsi/si_pipe.c
src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
src/util/disk_cache.c
src/util/u_queue.c
src/util/u_queue.h

index 929e186683fc3c384621eea69d5c190b847e3c7d..3038fc66830afb3fb1d80eb04782bc8583daecf3 100644 (file)
@@ -2203,7 +2203,7 @@ threaded_context_create(struct pipe_context *pipe,
     * from the queue before being executed, so keep one tc_batch slot for that
     * execution. Also, keep one unused slot for an unflushed batch.
     */
-   if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1))
+   if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1, 0))
       goto fail;
 
    for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
index 9871447497e1603b1966399a2a959daeedeca3e8..7c7a45be84dc05b813b1a4740a55621d480e5bd3 100644 (file)
@@ -276,7 +276,7 @@ batch_flush(struct fd_batch *batch)
                fd_batch_reference(&tmp, batch);
 
                if (!util_queue_is_initialized(&batch->ctx->flush_queue))
-                       util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1);
+                       util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
 
                util_queue_add_job(&batch->ctx->flush_queue,
                                batch, &batch->flush_fence,
index 0c2f6b37dba2e65718aab81672e0123c46d01f47..47426b41da6202df2aea0554c4f2639221968d22 100644 (file)
@@ -890,7 +890,7 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
        num_compiler_threads = MIN2(num_cpus, ARRAY_SIZE(sscreen->tm));
 
        if (!util_queue_init(&sscreen->shader_compiler_queue, "si_shader",
-                            32, num_compiler_threads)) {
+                            32, num_compiler_threads, 0)) {
                si_destroy_shader_cache(sscreen);
                FREE(sscreen);
                return NULL;
index c8bd60efdce4cda352b192b983121bd3151eafff..43f2ed2fbf4f9f3cc532433f6fbf0716e5d14223 100644 (file)
@@ -305,7 +305,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
    (void) mtx_init(&ws->global_bo_list_lock, mtx_plain);
    (void) mtx_init(&ws->bo_fence_lock, mtx_plain);
 
-   if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
+   if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1, 0)) {
       amdgpu_winsys_destroy(&ws->base);
       mtx_unlock(&dev_tab_mutex);
       return NULL;
index a485615ae4fd04d1989cfda6cdc6e0844249ab4f..9bbffa514a451c2fb145b1c1417a18c5f210b8b1 100644 (file)
@@ -821,7 +821,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
     ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
 
     if (ws->num_cpus > 1 && debug_get_option_thread())
-        util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1);
+        util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1, 0);
 
     /* Create the screen at the end. The winsys must be initialized
      * completely.
index 138d7ec174fc6a7caca51ec15f5f9686e61a44e7..b2229874e0180155c296a3639058a35135887fc8 100644 (file)
@@ -342,7 +342,7 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
     * really care about getting things to disk quickly just that it's not
     * blocking other tasks.
     */
-   util_queue_init(&cache->cache_queue, "disk_cache", 32, 1);
+   util_queue_init(&cache->cache_queue, "disk_cache", 32, 1, 0);
 
    /* Create driver id keys */
    size_t ts_size = strlen(timestamp) + 1;
index 01c3a96d5f3709afd6056bad481175788fe6e8a2..94fe2202a2cff2e68b29ce310e2663dab7e709d7 100644 (file)
@@ -147,6 +147,21 @@ util_queue_thread_func(void *input)
       u_thread_setname(name);
    }
 
+   if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
+#if defined(__linux__)
+      struct sched_param sched_param = {0};
+
+      /* The nice() function can only set a maximum of 19.
+       * SCHED_IDLE is the same as nice = 20.
+       *
+       * Note that Linux only allows decreasing the priority. The original
+       * priority can't be restored.
+       */
+      pthread_setschedparam(queue->threads[thread_index], SCHED_IDLE,
+                            &sched_param);
+#endif
+   }
+
    while (1) {
       struct util_queue_job job;
 
@@ -197,13 +212,15 @@ bool
 util_queue_init(struct util_queue *queue,
                 const char *name,
                 unsigned max_jobs,
-                unsigned num_threads)
+                unsigned num_threads,
+                unsigned flags)
 {
    unsigned i;
 
    memset(queue, 0, sizeof(*queue));
    queue->name = name;
    queue->num_threads = num_threads;
+   queue->flags = flags;
    queue->max_jobs = max_jobs;
 
    queue->jobs = (struct util_queue_job*)
index 9876865c65138c1e61a0a3bfe09ef7cba0a2bc0e..916802c96d17062911f90b56fc6ffbdc025d4fea 100644 (file)
@@ -42,6 +42,8 @@
 extern "C" {
 #endif
 
+#define UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY      (1 << 0)
+
 /* Job completion fence.
  * Put this into your job structure.
  */
@@ -69,6 +71,7 @@ struct util_queue {
    thrd_t *threads;
    int num_queued;
    unsigned num_threads;
+   unsigned flags;
    int kill_threads;
    int max_jobs;
    int write_idx, read_idx; /* ring buffer pointers */
@@ -81,7 +84,8 @@ struct util_queue {
 bool util_queue_init(struct util_queue *queue,
                      const char *name,
                      unsigned max_jobs,
-                     unsigned num_threads);
+                     unsigned num_threads,
+                     unsigned flags);
 void util_queue_destroy(struct util_queue *queue);
 void util_queue_fence_init(struct util_queue_fence *fence);
 void util_queue_fence_destroy(struct util_queue_fence *fence);