gallium/u_queue: allow the execute function to differ per job
authorMarek Olšák <marek.olsak@amd.com>
Sat, 11 Jun 2016 15:28:52 +0000 (17:28 +0200)
committerMarek Olšák <marek.olsak@amd.com>
Fri, 24 Jun 2016 10:24:40 +0000 (12:24 +0200)
so that independent types of jobs can use the same queue.

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
src/gallium/auxiliary/util/u_queue.c
src/gallium/auxiliary/util/u_queue.h
src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
src/gallium/winsys/radeon/drm/radeon_drm_cs.c
src/gallium/winsys/radeon/drm/radeon_drm_winsys.c

index 775cb73de437c6f731b6fdc9deec7284d71ea766..627c08a524ae09a60634ef8762cabcd0e407b7af 100644 (file)
@@ -84,7 +84,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
       }
 
       job = queue->jobs[queue->read_idx];
-      queue->jobs[queue->read_idx].job = NULL;
+      memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
       queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
 
       queue->num_queued--;
@@ -92,7 +92,7 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
       pipe_mutex_unlock(queue->lock);
 
       if (job.job) {
-         queue->execute_job(job.job, thread_index);
+         job.execute(job.job, thread_index);
          util_queue_fence_signal(job.fence);
       }
    }
@@ -113,8 +113,7 @@ bool
 util_queue_init(struct util_queue *queue,
                 const char *name,
                 unsigned max_jobs,
-                unsigned num_threads,
-                void (*execute_job)(void *, int))
+                unsigned num_threads)
 {
    unsigned i;
 
@@ -128,7 +127,6 @@ util_queue_init(struct util_queue *queue,
    if (!queue->jobs)
       goto fail;
 
-   queue->execute_job = execute_job;
    pipe_mutex_init(queue->lock);
 
    queue->num_queued = 0;
@@ -216,7 +214,8 @@ util_queue_fence_destroy(struct util_queue_fence *fence)
 void
 util_queue_add_job(struct util_queue *queue,
                    void *job,
-                   struct util_queue_fence *fence)
+                   struct util_queue_fence *fence,
+                   util_queue_execute_func execute)
 {
    struct util_queue_job *ptr;
 
@@ -234,6 +233,7 @@ util_queue_add_job(struct util_queue *queue,
    assert(ptr->job == NULL);
    ptr->job = job;
    ptr->fence = fence;
+   ptr->execute = execute;
    queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
 
    queue->num_queued++;
index 750327e0279142f21d244c6e3f5a79e54be1adaa..f70d6466887b9d6fc3d2e7b83d5c74f22d5fd51d 100644 (file)
@@ -44,9 +44,12 @@ struct util_queue_fence {
    int signalled;
 };
 
+typedef void (*util_queue_execute_func)(void *job, int thread_index);
+
 struct util_queue_job {
    void *job;
    struct util_queue_fence *fence;
+   util_queue_execute_func execute;
 };
 
 /* Put this into your context. */
@@ -62,21 +65,20 @@ struct util_queue {
    int max_jobs;
    int write_idx, read_idx; /* ring buffer pointers */
    struct util_queue_job *jobs;
-   void (*execute_job)(void *job, int thread_index);
 };
 
 bool util_queue_init(struct util_queue *queue,
                      const char *name,
                      unsigned max_jobs,
-                     unsigned num_threads,
-                     void (*execute_job)(void *, int));
+                     unsigned num_threads);
 void util_queue_destroy(struct util_queue *queue);
 void util_queue_fence_init(struct util_queue_fence *fence);
 void util_queue_fence_destroy(struct util_queue_fence *fence);
 
 void util_queue_add_job(struct util_queue *queue,
                         void *job,
-                        struct util_queue_fence *fence);
+                        struct util_queue_fence *fence,
+                        util_queue_execute_func execute);
 void util_queue_job_wait(struct util_queue_fence *fence);
 
 /* util_queue needs to be cleared to zeroes for this to work */
index 5636f834de1963d0c3b0e6603d9c7272e8ef63e9..8c1e9fbb11f1a355a5bc05601664ce6503864b14 100644 (file)
@@ -1052,7 +1052,8 @@ static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
       /* Submit. */
       if ((flags & RADEON_FLUSH_ASYNC) &&
           util_queue_is_initialized(&ws->cs_queue)) {
-         util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed);
+         util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
+                            amdgpu_cs_submit_ib);
       } else {
          amdgpu_cs_submit_ib(cs, 0);
       }
index 8782665cca995c04c8d5e66cf86a06d5dc3c4a2b..2a0b66d8dda2d274e906999363cdde65f2c1ae45 100644 (file)
@@ -493,7 +493,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
    pipe_mutex_init(ws->bo_fence_lock);
 
    if (sysconf(_SC_NPROCESSORS_ONLN) > 1 && debug_get_option_thread())
-      util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1, amdgpu_cs_submit_ib);
+      util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1);
 
    /* Create the screen at the end. The winsys must be initialized
     * completely.
index 9532a6a0f0f904efa8720039d240ad14dc5bea4e..efefd7517e78ee60b0fe7a79ae7cd8f7188256c5 100644 (file)
@@ -586,7 +586,8 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
         }
 
         if (util_queue_is_initialized(&cs->ws->cs_queue)) {
-            util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed);
+            util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
+                               radeon_drm_cs_emit_ioctl_oneshot);
             if (!(flags & RADEON_FLUSH_ASYNC))
                 radeon_drm_cs_sync_flush(rcs);
         } else {
index ea5d212803cb9932aa973015747088bd192cd2bb..f5f9d420722ebb88a7a5e921eb6a09d5755b0a12 100644 (file)
@@ -783,8 +783,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
     ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
 
     if (ws->num_cpus > 1 && debug_get_option_thread())
-        util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1,
-                        radeon_drm_cs_emit_ioctl_oneshot);
+        util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1);
 
     /* Create the screen at the end. The winsys must be initialized
      * completely.