gallium/util: move u_queue.{c,h} to src/util
authorTimothy Arceri <tarceri@itsqueeze.com>
Wed, 8 Mar 2017 04:33:38 +0000 (15:33 +1100)
committerTimothy Arceri <tarceri@itsqueeze.com>
Sun, 12 Mar 2017 06:49:03 +0000 (17:49 +1100)
This will allow us to use it outside of gallium for things like
compressing shader cache entries.

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
src/gallium/auxiliary/Makefile.sources
src/gallium/auxiliary/util/u_queue.c [deleted file]
src/gallium/auxiliary/util/u_queue.h [deleted file]
src/util/Makefile.sources
src/util/u_queue.c [new file with mode: 0644]
src/util/u_queue.h [new file with mode: 0644]

index 9063b5134c3699ec2ed9851cac588467469dbfb1..c3a3af9fbca409080b73beaf7a51e38f52e27508 100644 (file)
@@ -276,8 +276,6 @@ C_SOURCES := \
        util/u_pstipple.c \
        util/u_pstipple.h \
        util/u_pwr8.h \
-       util/u_queue.c \
-       util/u_queue.h \
        util/u_range.h \
        util/u_rect.h \
        util/u_resource.c \
diff --git a/src/gallium/auxiliary/util/u_queue.c b/src/gallium/auxiliary/util/u_queue.c
deleted file mode 100644 (file)
index 0519667..0000000
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Copyright © 2016 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
- * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- */
-
-#include "u_queue.h"
-#include "util/u_string.h"
-
-static void util_queue_killall_and_wait(struct util_queue *queue);
-
-/****************************************************************************
- * Wait for all queues to assert idle when exit() is called.
- *
- * Otherwise, C++ static variable destructors can be called while threads
- * are using the static variables.
- */
-
-static once_flag atexit_once_flag = ONCE_FLAG_INIT;
-static struct list_head queue_list;
-static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
-
-static void
-atexit_handler(void)
-{
-   struct util_queue *iter;
-
-   mtx_lock(&exit_mutex);
-   /* Wait for all queues to assert idle. */
-   LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
-      util_queue_killall_and_wait(iter);
-   }
-   mtx_unlock(&exit_mutex);
-}
-
-static void
-global_init(void)
-{
-   LIST_INITHEAD(&queue_list);
-   atexit(atexit_handler);
-}
-
-static void
-add_to_atexit_list(struct util_queue *queue)
-{
-   call_once(&atexit_once_flag, global_init);
-
-   mtx_lock(&exit_mutex);
-   LIST_ADD(&queue->head, &queue_list);
-   mtx_unlock(&exit_mutex);
-}
-
-static void
-remove_from_atexit_list(struct util_queue *queue)
-{
-   struct util_queue *iter, *tmp;
-
-   mtx_lock(&exit_mutex);
-   LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
-      if (iter == queue) {
-         LIST_DEL(&iter->head);
-         break;
-      }
-   }
-   mtx_unlock(&exit_mutex);
-}
-
-/****************************************************************************
- * util_queue_fence
- */
-
-static void
-util_queue_fence_signal(struct util_queue_fence *fence)
-{
-   mtx_lock(&fence->mutex);
-   fence->signalled = true;
-   cnd_broadcast(&fence->cond);
-   mtx_unlock(&fence->mutex);
-}
-
-void
-util_queue_fence_wait(struct util_queue_fence *fence)
-{
-   mtx_lock(&fence->mutex);
-   while (!fence->signalled)
-      cnd_wait(&fence->cond, &fence->mutex);
-   mtx_unlock(&fence->mutex);
-}
-
-void
-util_queue_fence_init(struct util_queue_fence *fence)
-{
-   memset(fence, 0, sizeof(*fence));
-   (void) mtx_init(&fence->mutex, mtx_plain);
-   cnd_init(&fence->cond);
-   fence->signalled = true;
-}
-
-void
-util_queue_fence_destroy(struct util_queue_fence *fence)
-{
-   assert(fence->signalled);
-   cnd_destroy(&fence->cond);
-   mtx_destroy(&fence->mutex);
-}
-
-/****************************************************************************
- * util_queue implementation
- */
-
-struct thread_input {
-   struct util_queue *queue;
-   int thread_index;
-};
-
-static int
-util_queue_thread_func(void *input)
-{
-   struct util_queue *queue = ((struct thread_input*)input)->queue;
-   int thread_index = ((struct thread_input*)input)->thread_index;
-
-   free(input);
-
-   if (queue->name) {
-      char name[16];
-      util_snprintf(name, sizeof(name), "%s:%i", queue->name, thread_index);
-      u_thread_setname(name);
-   }
-
-   while (1) {
-      struct util_queue_job job;
-
-      mtx_lock(&queue->lock);
-      assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
-
-      /* wait if the queue is empty */
-      while (!queue->kill_threads && queue->num_queued == 0)
-         cnd_wait(&queue->has_queued_cond, &queue->lock);
-
-      if (queue->kill_threads) {
-         mtx_unlock(&queue->lock);
-         break;
-      }
-
-      job = queue->jobs[queue->read_idx];
-      memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
-      queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
-
-      queue->num_queued--;
-      cnd_signal(&queue->has_space_cond);
-      mtx_unlock(&queue->lock);
-
-      if (job.job) {
-         job.execute(job.job, thread_index);
-         util_queue_fence_signal(job.fence);
-         if (job.cleanup)
-            job.cleanup(job.job, thread_index);
-      }
-   }
-
-   /* signal remaining jobs before terminating */
-   mtx_lock(&queue->lock);
-   while (queue->jobs[queue->read_idx].job) {
-      util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
-
-      queue->jobs[queue->read_idx].job = NULL;
-      queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
-   }
-   queue->num_queued = 0; /* reset this when exiting the thread */
-   mtx_unlock(&queue->lock);
-   return 0;
-}
-
-bool
-util_queue_init(struct util_queue *queue,
-                const char *name,
-                unsigned max_jobs,
-                unsigned num_threads)
-{
-   unsigned i;
-
-   memset(queue, 0, sizeof(*queue));
-   queue->name = name;
-   queue->num_threads = num_threads;
-   queue->max_jobs = max_jobs;
-
-   queue->jobs = (struct util_queue_job*)
-                 calloc(max_jobs, sizeof(struct util_queue_job));
-   if (!queue->jobs)
-      goto fail;
-
-   (void) mtx_init(&queue->lock, mtx_plain);
-
-   queue->num_queued = 0;
-   cnd_init(&queue->has_queued_cond);
-   cnd_init(&queue->has_space_cond);
-
-   queue->threads = (thrd_t*) calloc(num_threads, sizeof(thrd_t));
-   if (!queue->threads)
-      goto fail;
-
-   /* start threads */
-   for (i = 0; i < num_threads; i++) {
-      struct thread_input *input =
-         (struct thread_input *) malloc(sizeof(struct thread_input));
-      input->queue = queue;
-      input->thread_index = i;
-
-      queue->threads[i] = u_thread_create(util_queue_thread_func, input);
-
-      if (!queue->threads[i]) {
-         free(input);
-
-         if (i == 0) {
-            /* no threads created, fail */
-            goto fail;
-         } else {
-            /* at least one thread created, so use it */
-            queue->num_threads = i;
-            break;
-         }
-      }
-   }
-
-   add_to_atexit_list(queue);
-   return true;
-
-fail:
-   free(queue->threads);
-
-   if (queue->jobs) {
-      cnd_destroy(&queue->has_space_cond);
-      cnd_destroy(&queue->has_queued_cond);
-      mtx_destroy(&queue->lock);
-      free(queue->jobs);
-   }
-   /* also util_queue_is_initialized can be used to check for success */
-   memset(queue, 0, sizeof(*queue));
-   return false;
-}
-
-static void
-util_queue_killall_and_wait(struct util_queue *queue)
-{
-   unsigned i;
-
-   /* Signal all threads to terminate. */
-   mtx_lock(&queue->lock);
-   queue->kill_threads = 1;
-   cnd_broadcast(&queue->has_queued_cond);
-   mtx_unlock(&queue->lock);
-
-   for (i = 0; i < queue->num_threads; i++)
-      thrd_join(queue->threads[i], NULL);
-   queue->num_threads = 0;
-}
-
-void
-util_queue_destroy(struct util_queue *queue)
-{
-   util_queue_killall_and_wait(queue);
-   remove_from_atexit_list(queue);
-
-   cnd_destroy(&queue->has_space_cond);
-   cnd_destroy(&queue->has_queued_cond);
-   mtx_destroy(&queue->lock);
-   free(queue->jobs);
-   free(queue->threads);
-}
-
-void
-util_queue_add_job(struct util_queue *queue,
-                   void *job,
-                   struct util_queue_fence *fence,
-                   util_queue_execute_func execute,
-                   util_queue_execute_func cleanup)
-{
-   struct util_queue_job *ptr;
-
-   assert(fence->signalled);
-   fence->signalled = false;
-
-   mtx_lock(&queue->lock);
-   assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
-
-   /* if the queue is full, wait until there is space */
-   while (queue->num_queued == queue->max_jobs)
-      cnd_wait(&queue->has_space_cond, &queue->lock);
-
-   ptr = &queue->jobs[queue->write_idx];
-   assert(ptr->job == NULL);
-   ptr->job = job;
-   ptr->fence = fence;
-   ptr->execute = execute;
-   ptr->cleanup = cleanup;
-   queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
-
-   queue->num_queued++;
-   cnd_signal(&queue->has_queued_cond);
-   mtx_unlock(&queue->lock);
-}
-
-int64_t
-util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
-{
-   /* Allow some flexibility by not raising an error. */
-   if (thread_index >= queue->num_threads)
-      return 0;
-
-   return u_thread_get_time_nano(queue->threads[thread_index]);
-}
diff --git a/src/gallium/auxiliary/util/u_queue.h b/src/gallium/auxiliary/util/u_queue.h
deleted file mode 100644 (file)
index 0073890..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright © 2016 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
- * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- */
-
-/* Job queue with execution in a separate thread.
- *
- * Jobs can be added from any thread. After that, the wait call can be used
- * to wait for completion of the job.
- */
-
-#ifndef U_QUEUE_H
-#define U_QUEUE_H
-
-#include <string.h>
-
-#include "util/list.h"
-#include "util/u_thread.h"
-
-/* Job completion fence.
- * Put this into your job structure.
- */
-struct util_queue_fence {
-   mtx_t mutex;
-   cnd_t cond;
-   int signalled;
-};
-
-typedef void (*util_queue_execute_func)(void *job, int thread_index);
-
-struct util_queue_job {
-   void *job;
-   struct util_queue_fence *fence;
-   util_queue_execute_func execute;
-   util_queue_execute_func cleanup;
-};
-
-/* Put this into your context. */
-struct util_queue {
-   const char *name;
-   mtx_t lock;
-   cnd_t has_queued_cond;
-   cnd_t has_space_cond;
-   thrd_t *threads;
-   int num_queued;
-   unsigned num_threads;
-   int kill_threads;
-   int max_jobs;
-   int write_idx, read_idx; /* ring buffer pointers */
-   struct util_queue_job *jobs;
-
-   /* for cleanup at exit(), protected by exit_mutex */
-   struct list_head head;
-};
-
-bool util_queue_init(struct util_queue *queue,
-                     const char *name,
-                     unsigned max_jobs,
-                     unsigned num_threads);
-void util_queue_destroy(struct util_queue *queue);
-void util_queue_fence_init(struct util_queue_fence *fence);
-void util_queue_fence_destroy(struct util_queue_fence *fence);
-
-/* optional cleanup callback is called after fence is signaled: */
-void util_queue_add_job(struct util_queue *queue,
-                        void *job,
-                        struct util_queue_fence *fence,
-                        util_queue_execute_func execute,
-                        util_queue_execute_func cleanup);
-
-void util_queue_fence_wait(struct util_queue_fence *fence);
-int64_t util_queue_get_thread_time_nano(struct util_queue *queue,
-                                        unsigned thread_index);
-
-/* util_queue needs to be cleared to zeroes for this to work */
-static inline bool
-util_queue_is_initialized(struct util_queue *queue)
-{
-   return queue->threads != NULL;
-}
-
-static inline bool
-util_queue_fence_is_signalled(struct util_queue_fence *fence)
-{
-   return fence->signalled != 0;
-}
-
-#endif
index 8b5ee2d0c71914f5b1d2073b2d7c699db7beaa86..e6c1c7690f10d013cf6b7ceed4f5e1c6c6e9e59e 100644 (file)
@@ -44,6 +44,8 @@ MESA_UTIL_FILES :=    \
        texcompress_rgtc_tmp.h \
        u_atomic.h \
        u_endian.h \
+       u_queue.c \
+       u_queue.h \
        u_string.h \
        u_thread.h \
        u_vector.c \
diff --git a/src/util/u_queue.c b/src/util/u_queue.c
new file mode 100644 (file)
index 0000000..0519667
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * Copyright © 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+#include "u_queue.h"
+#include "util/u_string.h"
+
+static void util_queue_killall_and_wait(struct util_queue *queue);
+
+/****************************************************************************
+ * Wait for all queues to assert idle when exit() is called.
+ *
+ * Otherwise, C++ static variable destructors can be called while threads
+ * are using the static variables.
+ */
+
+static once_flag atexit_once_flag = ONCE_FLAG_INIT;
+static struct list_head queue_list;
+static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
+
+static void
+atexit_handler(void)
+{
+   struct util_queue *iter;
+
+   mtx_lock(&exit_mutex);
+   /* Wait for all queues to assert idle. */
+   LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
+      util_queue_killall_and_wait(iter);
+   }
+   mtx_unlock(&exit_mutex);
+}
+
+static void
+global_init(void)
+{
+   LIST_INITHEAD(&queue_list);
+   atexit(atexit_handler);
+}
+
+static void
+add_to_atexit_list(struct util_queue *queue)
+{
+   call_once(&atexit_once_flag, global_init);
+
+   mtx_lock(&exit_mutex);
+   LIST_ADD(&queue->head, &queue_list);
+   mtx_unlock(&exit_mutex);
+}
+
+static void
+remove_from_atexit_list(struct util_queue *queue)
+{
+   struct util_queue *iter, *tmp;
+
+   mtx_lock(&exit_mutex);
+   LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
+      if (iter == queue) {
+         LIST_DEL(&iter->head);
+         break;
+      }
+   }
+   mtx_unlock(&exit_mutex);
+}
+
+/****************************************************************************
+ * util_queue_fence
+ */
+
+static void
+util_queue_fence_signal(struct util_queue_fence *fence)
+{
+   mtx_lock(&fence->mutex);
+   fence->signalled = true;
+   cnd_broadcast(&fence->cond);
+   mtx_unlock(&fence->mutex);
+}
+
+void
+util_queue_fence_wait(struct util_queue_fence *fence)
+{
+   mtx_lock(&fence->mutex);
+   while (!fence->signalled)
+      cnd_wait(&fence->cond, &fence->mutex);
+   mtx_unlock(&fence->mutex);
+}
+
+void
+util_queue_fence_init(struct util_queue_fence *fence)
+{
+   memset(fence, 0, sizeof(*fence));
+   (void) mtx_init(&fence->mutex, mtx_plain);
+   cnd_init(&fence->cond);
+   fence->signalled = true;
+}
+
+void
+util_queue_fence_destroy(struct util_queue_fence *fence)
+{
+   assert(fence->signalled);
+   cnd_destroy(&fence->cond);
+   mtx_destroy(&fence->mutex);
+}
+
+/****************************************************************************
+ * util_queue implementation
+ */
+
+struct thread_input {
+   struct util_queue *queue;
+   int thread_index;
+};
+
+static int
+util_queue_thread_func(void *input)
+{
+   struct util_queue *queue = ((struct thread_input*)input)->queue;
+   int thread_index = ((struct thread_input*)input)->thread_index;
+
+   free(input);
+
+   if (queue->name) {
+      char name[16];
+      util_snprintf(name, sizeof(name), "%s:%i", queue->name, thread_index);
+      u_thread_setname(name);
+   }
+
+   while (1) {
+      struct util_queue_job job;
+
+      mtx_lock(&queue->lock);
+      assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
+
+      /* wait if the queue is empty */
+      while (!queue->kill_threads && queue->num_queued == 0)
+         cnd_wait(&queue->has_queued_cond, &queue->lock);
+
+      if (queue->kill_threads) {
+         mtx_unlock(&queue->lock);
+         break;
+      }
+
+      job = queue->jobs[queue->read_idx];
+      memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
+      queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
+
+      queue->num_queued--;
+      cnd_signal(&queue->has_space_cond);
+      mtx_unlock(&queue->lock);
+
+      if (job.job) {
+         job.execute(job.job, thread_index);
+         util_queue_fence_signal(job.fence);
+         if (job.cleanup)
+            job.cleanup(job.job, thread_index);
+      }
+   }
+
+   /* signal remaining jobs before terminating */
+   mtx_lock(&queue->lock);
+   while (queue->jobs[queue->read_idx].job) {
+      util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
+
+      queue->jobs[queue->read_idx].job = NULL;
+      queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
+   }
+   queue->num_queued = 0; /* reset this when exiting the thread */
+   mtx_unlock(&queue->lock);
+   return 0;
+}
+
+bool
+util_queue_init(struct util_queue *queue,
+                const char *name,
+                unsigned max_jobs,
+                unsigned num_threads)
+{
+   unsigned i;
+
+   memset(queue, 0, sizeof(*queue));
+   queue->name = name;
+   queue->num_threads = num_threads;
+   queue->max_jobs = max_jobs;
+
+   queue->jobs = (struct util_queue_job*)
+                 calloc(max_jobs, sizeof(struct util_queue_job));
+   if (!queue->jobs)
+      goto fail;
+
+   (void) mtx_init(&queue->lock, mtx_plain);
+
+   queue->num_queued = 0;
+   cnd_init(&queue->has_queued_cond);
+   cnd_init(&queue->has_space_cond);
+
+   queue->threads = (thrd_t*) calloc(num_threads, sizeof(thrd_t));
+   if (!queue->threads)
+      goto fail;
+
+   /* start threads */
+   for (i = 0; i < num_threads; i++) {
+      struct thread_input *input =
+         (struct thread_input *) malloc(sizeof(struct thread_input));
+      input->queue = queue;
+      input->thread_index = i;
+
+      queue->threads[i] = u_thread_create(util_queue_thread_func, input);
+
+      if (!queue->threads[i]) {
+         free(input);
+
+         if (i == 0) {
+            /* no threads created, fail */
+            goto fail;
+         } else {
+            /* at least one thread created, so use it */
+            queue->num_threads = i;
+            break;
+         }
+      }
+   }
+
+   add_to_atexit_list(queue);
+   return true;
+
+fail:
+   free(queue->threads);
+
+   if (queue->jobs) {
+      cnd_destroy(&queue->has_space_cond);
+      cnd_destroy(&queue->has_queued_cond);
+      mtx_destroy(&queue->lock);
+      free(queue->jobs);
+   }
+   /* also util_queue_is_initialized can be used to check for success */
+   memset(queue, 0, sizeof(*queue));
+   return false;
+}
+
+static void
+util_queue_killall_and_wait(struct util_queue *queue)
+{
+   unsigned i;
+
+   /* Signal all threads to terminate. */
+   mtx_lock(&queue->lock);
+   queue->kill_threads = 1;
+   cnd_broadcast(&queue->has_queued_cond);
+   mtx_unlock(&queue->lock);
+
+   for (i = 0; i < queue->num_threads; i++)
+      thrd_join(queue->threads[i], NULL);
+   queue->num_threads = 0;
+}
+
+void
+util_queue_destroy(struct util_queue *queue)
+{
+   util_queue_killall_and_wait(queue);
+   remove_from_atexit_list(queue);
+
+   cnd_destroy(&queue->has_space_cond);
+   cnd_destroy(&queue->has_queued_cond);
+   mtx_destroy(&queue->lock);
+   free(queue->jobs);
+   free(queue->threads);
+}
+
+void
+util_queue_add_job(struct util_queue *queue,
+                   void *job,
+                   struct util_queue_fence *fence,
+                   util_queue_execute_func execute,
+                   util_queue_execute_func cleanup)
+{
+   struct util_queue_job *ptr;
+
+   assert(fence->signalled);
+   fence->signalled = false;
+
+   mtx_lock(&queue->lock);
+   assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
+
+   /* if the queue is full, wait until there is space */
+   while (queue->num_queued == queue->max_jobs)
+      cnd_wait(&queue->has_space_cond, &queue->lock);
+
+   ptr = &queue->jobs[queue->write_idx];
+   assert(ptr->job == NULL);
+   ptr->job = job;
+   ptr->fence = fence;
+   ptr->execute = execute;
+   ptr->cleanup = cleanup;
+   queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
+
+   queue->num_queued++;
+   cnd_signal(&queue->has_queued_cond);
+   mtx_unlock(&queue->lock);
+}
+
+int64_t
+util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
+{
+   /* Allow some flexibility by not raising an error. */
+   if (thread_index >= queue->num_threads)
+      return 0;
+
+   return u_thread_get_time_nano(queue->threads[thread_index]);
+}
diff --git a/src/util/u_queue.h b/src/util/u_queue.h
new file mode 100644 (file)
index 0000000..0073890
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+/* Job queue with execution in a separate thread.
+ *
+ * Jobs can be added from any thread. After that, the wait call can be used
+ * to wait for completion of the job.
+ */
+
+#ifndef U_QUEUE_H
+#define U_QUEUE_H
+
+#include <string.h>
+
+#include "util/list.h"
+#include "util/u_thread.h"
+
+/* Job completion fence.
+ * Put this into your job structure.
+ */
+struct util_queue_fence {
+   mtx_t mutex;
+   cnd_t cond;
+   int signalled;
+};
+
+typedef void (*util_queue_execute_func)(void *job, int thread_index);
+
+struct util_queue_job {
+   void *job;
+   struct util_queue_fence *fence;
+   util_queue_execute_func execute;
+   util_queue_execute_func cleanup;
+};
+
+/* Put this into your context. */
+struct util_queue {
+   const char *name;
+   mtx_t lock;
+   cnd_t has_queued_cond;
+   cnd_t has_space_cond;
+   thrd_t *threads;
+   int num_queued;
+   unsigned num_threads;
+   int kill_threads;
+   int max_jobs;
+   int write_idx, read_idx; /* ring buffer pointers */
+   struct util_queue_job *jobs;
+
+   /* for cleanup at exit(), protected by exit_mutex */
+   struct list_head head;
+};
+
+bool util_queue_init(struct util_queue *queue,
+                     const char *name,
+                     unsigned max_jobs,
+                     unsigned num_threads);
+void util_queue_destroy(struct util_queue *queue);
+void util_queue_fence_init(struct util_queue_fence *fence);
+void util_queue_fence_destroy(struct util_queue_fence *fence);
+
+/* optional cleanup callback is called after fence is signaled: */
+void util_queue_add_job(struct util_queue *queue,
+                        void *job,
+                        struct util_queue_fence *fence,
+                        util_queue_execute_func execute,
+                        util_queue_execute_func cleanup);
+
+void util_queue_fence_wait(struct util_queue_fence *fence);
+int64_t util_queue_get_thread_time_nano(struct util_queue *queue,
+                                        unsigned thread_index);
+
+/* util_queue needs to be cleared to zeroes for this to work */
+static inline bool
+util_queue_is_initialized(struct util_queue *queue)
+{
+   return queue->threads != NULL;
+}
+
+static inline bool
+util_queue_fence_is_signalled(struct util_queue_fence *fence)
+{
+   return fence->signalled != 0;
+}
+
+#endif