gallium/util: replace pipe_mutex_unlock() with mtx_unlock()
[mesa.git] / src / gallium / auxiliary / util / u_queue.c
1 /*
2 * Copyright © 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "u_queue.h"
28 #include "u_memory.h"
29 #include "u_string.h"
30 #include "os/os_time.h"
31
32 static void util_queue_killall_and_wait(struct util_queue *queue);
33
34 /****************************************************************************
35 * Wait for all queues to assert idle when exit() is called.
36 *
37 * Otherwise, C++ static variable destructors can be called while threads
38 * are using the static variables.
39 */
40
41 static once_flag atexit_once_flag = ONCE_FLAG_INIT;
42 static struct list_head queue_list;
43 static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
44
45 static void
46 atexit_handler(void)
47 {
48 struct util_queue *iter;
49
50 mtx_lock(&exit_mutex);
51 /* Wait for all queues to assert idle. */
52 LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
53 util_queue_killall_and_wait(iter);
54 }
55 mtx_unlock(&exit_mutex);
56 }
57
58 static void
59 global_init(void)
60 {
61 LIST_INITHEAD(&queue_list);
62 atexit(atexit_handler);
63 }
64
65 static void
66 add_to_atexit_list(struct util_queue *queue)
67 {
68 call_once(&atexit_once_flag, global_init);
69
70 mtx_lock(&exit_mutex);
71 LIST_ADD(&queue->head, &queue_list);
72 mtx_unlock(&exit_mutex);
73 }
74
75 static void
76 remove_from_atexit_list(struct util_queue *queue)
77 {
78 struct util_queue *iter, *tmp;
79
80 mtx_lock(&exit_mutex);
81 LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
82 if (iter == queue) {
83 LIST_DEL(&iter->head);
84 break;
85 }
86 }
87 mtx_unlock(&exit_mutex);
88 }
89
90 /****************************************************************************
91 * util_queue_fence
92 */
93
94 static void
95 util_queue_fence_signal(struct util_queue_fence *fence)
96 {
97 mtx_lock(&fence->mutex);
98 fence->signalled = true;
99 cnd_broadcast(&fence->cond);
100 mtx_unlock(&fence->mutex);
101 }
102
103 void
104 util_queue_fence_wait(struct util_queue_fence *fence)
105 {
106 mtx_lock(&fence->mutex);
107 while (!fence->signalled)
108 cnd_wait(&fence->cond, &fence->mutex);
109 mtx_unlock(&fence->mutex);
110 }
111
112 void
113 util_queue_fence_init(struct util_queue_fence *fence)
114 {
115 memset(fence, 0, sizeof(*fence));
116 (void) mtx_init(&fence->mutex, mtx_plain);
117 cnd_init(&fence->cond);
118 fence->signalled = true;
119 }
120
121 void
122 util_queue_fence_destroy(struct util_queue_fence *fence)
123 {
124 assert(fence->signalled);
125 cnd_destroy(&fence->cond);
126 mtx_destroy(&fence->mutex);
127 }
128
129 /****************************************************************************
130 * util_queue implementation
131 */
132
133 struct thread_input {
134 struct util_queue *queue;
135 int thread_index;
136 };
137
138 static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
139 {
140 struct util_queue *queue = ((struct thread_input*)input)->queue;
141 int thread_index = ((struct thread_input*)input)->thread_index;
142
143 FREE(input);
144
145 if (queue->name) {
146 char name[16];
147 util_snprintf(name, sizeof(name), "%s:%i", queue->name, thread_index);
148 pipe_thread_setname(name);
149 }
150
151 while (1) {
152 struct util_queue_job job;
153
154 mtx_lock(&queue->lock);
155 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
156
157 /* wait if the queue is empty */
158 while (!queue->kill_threads && queue->num_queued == 0)
159 cnd_wait(&queue->has_queued_cond, &queue->lock);
160
161 if (queue->kill_threads) {
162 mtx_unlock(&queue->lock);
163 break;
164 }
165
166 job = queue->jobs[queue->read_idx];
167 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
168 queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
169
170 queue->num_queued--;
171 cnd_signal(&queue->has_space_cond);
172 mtx_unlock(&queue->lock);
173
174 if (job.job) {
175 job.execute(job.job, thread_index);
176 util_queue_fence_signal(job.fence);
177 if (job.cleanup)
178 job.cleanup(job.job, thread_index);
179 }
180 }
181
182 /* signal remaining jobs before terminating */
183 mtx_lock(&queue->lock);
184 while (queue->jobs[queue->read_idx].job) {
185 util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
186
187 queue->jobs[queue->read_idx].job = NULL;
188 queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
189 }
190 queue->num_queued = 0; /* reset this when exiting the thread */
191 mtx_unlock(&queue->lock);
192 return 0;
193 }
194
195 bool
196 util_queue_init(struct util_queue *queue,
197 const char *name,
198 unsigned max_jobs,
199 unsigned num_threads)
200 {
201 unsigned i;
202
203 memset(queue, 0, sizeof(*queue));
204 queue->name = name;
205 queue->num_threads = num_threads;
206 queue->max_jobs = max_jobs;
207
208 queue->jobs = (struct util_queue_job*)
209 CALLOC(max_jobs, sizeof(struct util_queue_job));
210 if (!queue->jobs)
211 goto fail;
212
213 (void) mtx_init(&queue->lock, mtx_plain);
214
215 queue->num_queued = 0;
216 cnd_init(&queue->has_queued_cond);
217 cnd_init(&queue->has_space_cond);
218
219 queue->threads = (pipe_thread*)CALLOC(num_threads, sizeof(pipe_thread));
220 if (!queue->threads)
221 goto fail;
222
223 /* start threads */
224 for (i = 0; i < num_threads; i++) {
225 struct thread_input *input = MALLOC_STRUCT(thread_input);
226 input->queue = queue;
227 input->thread_index = i;
228
229 queue->threads[i] = pipe_thread_create(util_queue_thread_func, input);
230
231 if (!queue->threads[i]) {
232 FREE(input);
233
234 if (i == 0) {
235 /* no threads created, fail */
236 goto fail;
237 } else {
238 /* at least one thread created, so use it */
239 queue->num_threads = i;
240 break;
241 }
242 }
243 }
244
245 add_to_atexit_list(queue);
246 return true;
247
248 fail:
249 FREE(queue->threads);
250
251 if (queue->jobs) {
252 cnd_destroy(&queue->has_space_cond);
253 cnd_destroy(&queue->has_queued_cond);
254 mtx_destroy(&queue->lock);
255 FREE(queue->jobs);
256 }
257 /* also util_queue_is_initialized can be used to check for success */
258 memset(queue, 0, sizeof(*queue));
259 return false;
260 }
261
262 static void
263 util_queue_killall_and_wait(struct util_queue *queue)
264 {
265 unsigned i;
266
267 /* Signal all threads to terminate. */
268 mtx_lock(&queue->lock);
269 queue->kill_threads = 1;
270 cnd_broadcast(&queue->has_queued_cond);
271 mtx_unlock(&queue->lock);
272
273 for (i = 0; i < queue->num_threads; i++)
274 pipe_thread_wait(queue->threads[i]);
275 queue->num_threads = 0;
276 }
277
278 void
279 util_queue_destroy(struct util_queue *queue)
280 {
281 util_queue_killall_and_wait(queue);
282 remove_from_atexit_list(queue);
283
284 cnd_destroy(&queue->has_space_cond);
285 cnd_destroy(&queue->has_queued_cond);
286 mtx_destroy(&queue->lock);
287 FREE(queue->jobs);
288 FREE(queue->threads);
289 }
290
291 void
292 util_queue_add_job(struct util_queue *queue,
293 void *job,
294 struct util_queue_fence *fence,
295 util_queue_execute_func execute,
296 util_queue_execute_func cleanup)
297 {
298 struct util_queue_job *ptr;
299
300 assert(fence->signalled);
301 fence->signalled = false;
302
303 mtx_lock(&queue->lock);
304 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
305
306 /* if the queue is full, wait until there is space */
307 while (queue->num_queued == queue->max_jobs)
308 cnd_wait(&queue->has_space_cond, &queue->lock);
309
310 ptr = &queue->jobs[queue->write_idx];
311 assert(ptr->job == NULL);
312 ptr->job = job;
313 ptr->fence = fence;
314 ptr->execute = execute;
315 ptr->cleanup = cleanup;
316 queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
317
318 queue->num_queued++;
319 cnd_signal(&queue->has_queued_cond);
320 mtx_unlock(&queue->lock);
321 }
322
323 int64_t
324 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
325 {
326 /* Allow some flexibility by not raising an error. */
327 if (thread_index >= queue->num_threads)
328 return 0;
329
330 return pipe_thread_get_time_nano(queue->threads[thread_index]);
331 }