util: fix compilation on macos
[mesa.git] / src / util / u_queue.c
1 /*
2 * Copyright © 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "u_queue.h"
28
29 #include "c11/threads.h"
30
31 #include "util/os_time.h"
32 #include "util/u_string.h"
33 #include "util/u_thread.h"
34 #include "u_process.h"
35
36 static void
37 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
38 bool finish_locked);
39
40 /****************************************************************************
41 * Wait for all queues to assert idle when exit() is called.
42 *
43 * Otherwise, C++ static variable destructors can be called while threads
44 * are using the static variables.
45 */
46
47 static once_flag atexit_once_flag = ONCE_FLAG_INIT;
48 static struct list_head queue_list;
49 static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
50
51 static void
52 atexit_handler(void)
53 {
54 struct util_queue *iter;
55
56 mtx_lock(&exit_mutex);
57 /* Wait for all queues to assert idle. */
58 LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
59 util_queue_kill_threads(iter, 0, false);
60 }
61 mtx_unlock(&exit_mutex);
62 }
63
64 static void
65 global_init(void)
66 {
67 LIST_INITHEAD(&queue_list);
68 atexit(atexit_handler);
69 }
70
71 static void
72 add_to_atexit_list(struct util_queue *queue)
73 {
74 call_once(&atexit_once_flag, global_init);
75
76 mtx_lock(&exit_mutex);
77 LIST_ADD(&queue->head, &queue_list);
78 mtx_unlock(&exit_mutex);
79 }
80
81 static void
82 remove_from_atexit_list(struct util_queue *queue)
83 {
84 struct util_queue *iter, *tmp;
85
86 mtx_lock(&exit_mutex);
87 LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
88 if (iter == queue) {
89 LIST_DEL(&iter->head);
90 break;
91 }
92 }
93 mtx_unlock(&exit_mutex);
94 }
95
96 /****************************************************************************
97 * util_queue_fence
98 */
99
100 #ifdef UTIL_QUEUE_FENCE_FUTEX
101 static bool
102 do_futex_fence_wait(struct util_queue_fence *fence,
103 bool timeout, int64_t abs_timeout)
104 {
105 uint32_t v = fence->val;
106 struct timespec ts;
107 ts.tv_sec = abs_timeout / (1000*1000*1000);
108 ts.tv_nsec = abs_timeout % (1000*1000*1000);
109
110 while (v != 0) {
111 if (v != 2) {
112 v = p_atomic_cmpxchg(&fence->val, 1, 2);
113 if (v == 0)
114 return true;
115 }
116
117 int r = futex_wait(&fence->val, 2, timeout ? &ts : NULL);
118 if (timeout && r < 0) {
119 if (errno == ETIMEDOUT)
120 return false;
121 }
122
123 v = fence->val;
124 }
125
126 return true;
127 }
128
129 void
130 _util_queue_fence_wait(struct util_queue_fence *fence)
131 {
132 do_futex_fence_wait(fence, false, 0);
133 }
134
135 bool
136 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
137 int64_t abs_timeout)
138 {
139 return do_futex_fence_wait(fence, true, abs_timeout);
140 }
141
142 #endif
143
144 #ifdef UTIL_QUEUE_FENCE_STANDARD
145 void
146 util_queue_fence_signal(struct util_queue_fence *fence)
147 {
148 mtx_lock(&fence->mutex);
149 fence->signalled = true;
150 cnd_broadcast(&fence->cond);
151 mtx_unlock(&fence->mutex);
152 }
153
154 void
155 _util_queue_fence_wait(struct util_queue_fence *fence)
156 {
157 mtx_lock(&fence->mutex);
158 while (!fence->signalled)
159 cnd_wait(&fence->cond, &fence->mutex);
160 mtx_unlock(&fence->mutex);
161 }
162
163 bool
164 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
165 int64_t abs_timeout)
166 {
167 /* This terrible hack is made necessary by the fact that we really want an
168 * internal interface consistent with os_time_*, but cnd_timedwait is spec'd
169 * to be relative to the TIME_UTC clock.
170 */
171 int64_t rel = abs_timeout - os_time_get_nano();
172
173 if (rel > 0) {
174 struct timespec ts;
175
176 timespec_get(&ts, TIME_UTC);
177
178 ts.tv_sec += abs_timeout / (1000*1000*1000);
179 ts.tv_nsec += abs_timeout % (1000*1000*1000);
180 if (ts.tv_nsec >= (1000*1000*1000)) {
181 ts.tv_sec++;
182 ts.tv_nsec -= (1000*1000*1000);
183 }
184
185 mtx_lock(&fence->mutex);
186 while (!fence->signalled) {
187 if (cnd_timedwait(&fence->cond, &fence->mutex, &ts) != thrd_success)
188 break;
189 }
190 mtx_unlock(&fence->mutex);
191 }
192
193 return fence->signalled;
194 }
195
196 void
197 util_queue_fence_init(struct util_queue_fence *fence)
198 {
199 memset(fence, 0, sizeof(*fence));
200 (void) mtx_init(&fence->mutex, mtx_plain);
201 cnd_init(&fence->cond);
202 fence->signalled = true;
203 }
204
205 void
206 util_queue_fence_destroy(struct util_queue_fence *fence)
207 {
208 assert(fence->signalled);
209
210 /* Ensure that another thread is not in the middle of
211 * util_queue_fence_signal (having set the fence to signalled but still
212 * holding the fence mutex).
213 *
214 * A common contract between threads is that as soon as a fence is signalled
215 * by thread A, thread B is allowed to destroy it. Since
216 * util_queue_fence_is_signalled does not lock the fence mutex (for
217 * performance reasons), we must do so here.
218 */
219 mtx_lock(&fence->mutex);
220 mtx_unlock(&fence->mutex);
221
222 cnd_destroy(&fence->cond);
223 mtx_destroy(&fence->mutex);
224 }
225 #endif
226
227 /****************************************************************************
228 * util_queue implementation
229 */
230
231 struct thread_input {
232 struct util_queue *queue;
233 int thread_index;
234 };
235
236 static int
237 util_queue_thread_func(void *input)
238 {
239 struct util_queue *queue = ((struct thread_input*)input)->queue;
240 int thread_index = ((struct thread_input*)input)->thread_index;
241
242 free(input);
243
244 #ifdef HAVE_PTHREAD_SETAFFINITY
245 if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) {
246 /* Don't inherit the thread affinity from the parent thread.
247 * Set the full mask.
248 */
249 cpu_set_t cpuset;
250 CPU_ZERO(&cpuset);
251 for (unsigned i = 0; i < CPU_SETSIZE; i++)
252 CPU_SET(i, &cpuset);
253
254 pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
255 }
256 #endif
257
258 if (strlen(queue->name) > 0) {
259 char name[16];
260 snprintf(name, sizeof(name), "%s%i", queue->name, thread_index);
261 u_thread_setname(name);
262 }
263
264 while (1) {
265 struct util_queue_job job;
266
267 mtx_lock(&queue->lock);
268 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
269
270 /* wait if the queue is empty */
271 while (thread_index < queue->num_threads && queue->num_queued == 0)
272 cnd_wait(&queue->has_queued_cond, &queue->lock);
273
274 /* only kill threads that are above "num_threads" */
275 if (thread_index >= queue->num_threads) {
276 mtx_unlock(&queue->lock);
277 break;
278 }
279
280 job = queue->jobs[queue->read_idx];
281 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
282 queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
283
284 queue->num_queued--;
285 cnd_signal(&queue->has_space_cond);
286 mtx_unlock(&queue->lock);
287
288 if (job.job) {
289 job.execute(job.job, thread_index);
290 util_queue_fence_signal(job.fence);
291 if (job.cleanup)
292 job.cleanup(job.job, thread_index);
293 }
294 }
295
296 /* signal remaining jobs if all threads are being terminated */
297 mtx_lock(&queue->lock);
298 if (queue->num_threads == 0) {
299 for (unsigned i = queue->read_idx; i != queue->write_idx;
300 i = (i + 1) % queue->max_jobs) {
301 if (queue->jobs[i].job) {
302 util_queue_fence_signal(queue->jobs[i].fence);
303 queue->jobs[i].job = NULL;
304 }
305 }
306 queue->read_idx = queue->write_idx;
307 queue->num_queued = 0;
308 }
309 mtx_unlock(&queue->lock);
310 return 0;
311 }
312
313 static bool
314 util_queue_create_thread(struct util_queue *queue, unsigned index)
315 {
316 struct thread_input *input =
317 (struct thread_input *) malloc(sizeof(struct thread_input));
318 input->queue = queue;
319 input->thread_index = index;
320
321 queue->threads[index] = u_thread_create(util_queue_thread_func, input);
322
323 if (!queue->threads[index]) {
324 free(input);
325 return false;
326 }
327
328 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
329 #if defined(__linux__) && defined(SCHED_IDLE)
330 struct sched_param sched_param = {0};
331
332 /* The nice() function can only set a maximum of 19.
333 * SCHED_IDLE is the same as nice = 20.
334 *
335 * Note that Linux only allows decreasing the priority. The original
336 * priority can't be restored.
337 */
338 pthread_setschedparam(queue->threads[index], SCHED_IDLE, &sched_param);
339 #endif
340 }
341 return true;
342 }
343
344 void
345 util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads)
346 {
347 num_threads = MIN2(num_threads, queue->max_threads);
348 num_threads = MAX2(num_threads, 1);
349
350 mtx_lock(&queue->finish_lock);
351 unsigned old_num_threads = queue->num_threads;
352
353 if (num_threads == old_num_threads) {
354 mtx_unlock(&queue->finish_lock);
355 return;
356 }
357
358 if (num_threads < old_num_threads) {
359 util_queue_kill_threads(queue, num_threads, true);
360 mtx_unlock(&queue->finish_lock);
361 return;
362 }
363
364 /* Create threads.
365 *
366 * We need to update num_threads first, because threads terminate
367 * when thread_index < num_threads.
368 */
369 queue->num_threads = num_threads;
370 for (unsigned i = old_num_threads; i < num_threads; i++) {
371 if (!util_queue_create_thread(queue, i))
372 break;
373 }
374 mtx_unlock(&queue->finish_lock);
375 }
376
377 bool
378 util_queue_init(struct util_queue *queue,
379 const char *name,
380 unsigned max_jobs,
381 unsigned num_threads,
382 unsigned flags)
383 {
384 unsigned i;
385
386 /* Form the thread name from process_name and name, limited to 13
387 * characters. Characters 14-15 are reserved for the thread number.
388 * Character 16 should be 0. Final form: "process:name12"
389 *
390 * If name is too long, it's truncated. If any space is left, the process
391 * name fills it.
392 */
393 const char *process_name = util_get_process_name();
394 int process_len = process_name ? strlen(process_name) : 0;
395 int name_len = strlen(name);
396 const int max_chars = sizeof(queue->name) - 1;
397
398 name_len = MIN2(name_len, max_chars);
399
400 /* See if there is any space left for the process name, reserve 1 for
401 * the colon. */
402 process_len = MIN2(process_len, max_chars - name_len - 1);
403 process_len = MAX2(process_len, 0);
404
405 memset(queue, 0, sizeof(*queue));
406
407 if (process_len) {
408 snprintf(queue->name, sizeof(queue->name), "%.*s:%s",
409 process_len, process_name, name);
410 } else {
411 snprintf(queue->name, sizeof(queue->name), "%s", name);
412 }
413
414 queue->flags = flags;
415 queue->max_threads = num_threads;
416 queue->num_threads = num_threads;
417 queue->max_jobs = max_jobs;
418
419 queue->jobs = (struct util_queue_job*)
420 calloc(max_jobs, sizeof(struct util_queue_job));
421 if (!queue->jobs)
422 goto fail;
423
424 (void) mtx_init(&queue->lock, mtx_plain);
425 (void) mtx_init(&queue->finish_lock, mtx_plain);
426
427 queue->num_queued = 0;
428 cnd_init(&queue->has_queued_cond);
429 cnd_init(&queue->has_space_cond);
430
431 queue->threads = (thrd_t*) calloc(num_threads, sizeof(thrd_t));
432 if (!queue->threads)
433 goto fail;
434
435 /* start threads */
436 for (i = 0; i < num_threads; i++) {
437 if (!util_queue_create_thread(queue, i)) {
438 if (i == 0) {
439 /* no threads created, fail */
440 goto fail;
441 } else {
442 /* at least one thread created, so use it */
443 queue->num_threads = i;
444 break;
445 }
446 }
447 }
448
449 add_to_atexit_list(queue);
450 return true;
451
452 fail:
453 free(queue->threads);
454
455 if (queue->jobs) {
456 cnd_destroy(&queue->has_space_cond);
457 cnd_destroy(&queue->has_queued_cond);
458 mtx_destroy(&queue->lock);
459 free(queue->jobs);
460 }
461 /* also util_queue_is_initialized can be used to check for success */
462 memset(queue, 0, sizeof(*queue));
463 return false;
464 }
465
466 static void
467 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
468 bool finish_locked)
469 {
470 unsigned i;
471
472 /* Signal all threads to terminate. */
473 if (!finish_locked)
474 mtx_lock(&queue->finish_lock);
475
476 if (keep_num_threads >= queue->num_threads) {
477 mtx_unlock(&queue->finish_lock);
478 return;
479 }
480
481 mtx_lock(&queue->lock);
482 unsigned old_num_threads = queue->num_threads;
483 /* Setting num_threads is what causes the threads to terminate.
484 * Then cnd_broadcast wakes them up and they will exit their function.
485 */
486 queue->num_threads = keep_num_threads;
487 cnd_broadcast(&queue->has_queued_cond);
488 mtx_unlock(&queue->lock);
489
490 for (i = keep_num_threads; i < old_num_threads; i++)
491 thrd_join(queue->threads[i], NULL);
492
493 if (!finish_locked)
494 mtx_unlock(&queue->finish_lock);
495 }
496
497 void
498 util_queue_destroy(struct util_queue *queue)
499 {
500 util_queue_kill_threads(queue, 0, false);
501 remove_from_atexit_list(queue);
502
503 cnd_destroy(&queue->has_space_cond);
504 cnd_destroy(&queue->has_queued_cond);
505 mtx_destroy(&queue->finish_lock);
506 mtx_destroy(&queue->lock);
507 free(queue->jobs);
508 free(queue->threads);
509 }
510
511 void
512 util_queue_add_job(struct util_queue *queue,
513 void *job,
514 struct util_queue_fence *fence,
515 util_queue_execute_func execute,
516 util_queue_execute_func cleanup)
517 {
518 struct util_queue_job *ptr;
519
520 mtx_lock(&queue->lock);
521 if (queue->num_threads == 0) {
522 mtx_unlock(&queue->lock);
523 /* well no good option here, but any leaks will be
524 * short-lived as things are shutting down..
525 */
526 return;
527 }
528
529 util_queue_fence_reset(fence);
530
531 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
532
533 if (queue->num_queued == queue->max_jobs) {
534 if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL) {
535 /* If the queue is full, make it larger to avoid waiting for a free
536 * slot.
537 */
538 unsigned new_max_jobs = queue->max_jobs + 8;
539 struct util_queue_job *jobs =
540 (struct util_queue_job*)calloc(new_max_jobs,
541 sizeof(struct util_queue_job));
542 assert(jobs);
543
544 /* Copy all queued jobs into the new list. */
545 unsigned num_jobs = 0;
546 unsigned i = queue->read_idx;
547
548 do {
549 jobs[num_jobs++] = queue->jobs[i];
550 i = (i + 1) % queue->max_jobs;
551 } while (i != queue->write_idx);
552
553 assert(num_jobs == queue->num_queued);
554
555 free(queue->jobs);
556 queue->jobs = jobs;
557 queue->read_idx = 0;
558 queue->write_idx = num_jobs;
559 queue->max_jobs = new_max_jobs;
560 } else {
561 /* Wait until there is a free slot. */
562 while (queue->num_queued == queue->max_jobs)
563 cnd_wait(&queue->has_space_cond, &queue->lock);
564 }
565 }
566
567 ptr = &queue->jobs[queue->write_idx];
568 assert(ptr->job == NULL);
569 ptr->job = job;
570 ptr->fence = fence;
571 ptr->execute = execute;
572 ptr->cleanup = cleanup;
573 queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
574
575 queue->num_queued++;
576 cnd_signal(&queue->has_queued_cond);
577 mtx_unlock(&queue->lock);
578 }
579
580 /**
581 * Remove a queued job. If the job hasn't started execution, it's removed from
582 * the queue. If the job has started execution, the function waits for it to
583 * complete.
584 *
585 * In all cases, the fence is signalled when the function returns.
586 *
587 * The function can be used when destroying an object associated with the job
588 * when you don't care about the job completion state.
589 */
590 void
591 util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence)
592 {
593 bool removed = false;
594
595 if (util_queue_fence_is_signalled(fence))
596 return;
597
598 mtx_lock(&queue->lock);
599 for (unsigned i = queue->read_idx; i != queue->write_idx;
600 i = (i + 1) % queue->max_jobs) {
601 if (queue->jobs[i].fence == fence) {
602 if (queue->jobs[i].cleanup)
603 queue->jobs[i].cleanup(queue->jobs[i].job, -1);
604
605 /* Just clear it. The threads will treat as a no-op job. */
606 memset(&queue->jobs[i], 0, sizeof(queue->jobs[i]));
607 removed = true;
608 break;
609 }
610 }
611 mtx_unlock(&queue->lock);
612
613 if (removed)
614 util_queue_fence_signal(fence);
615 else
616 util_queue_fence_wait(fence);
617 }
618
619 static void
620 util_queue_finish_execute(void *data, int num_thread)
621 {
622 util_barrier *barrier = data;
623 util_barrier_wait(barrier);
624 }
625
626 /**
627 * Wait until all previously added jobs have completed.
628 */
629 void
630 util_queue_finish(struct util_queue *queue)
631 {
632 util_barrier barrier;
633 struct util_queue_fence *fences;
634
635 /* If 2 threads were adding jobs for 2 different barries at the same time,
636 * a deadlock would happen, because 1 barrier requires that all threads
637 * wait for it exclusively.
638 */
639 mtx_lock(&queue->finish_lock);
640 fences = malloc(queue->num_threads * sizeof(*fences));
641 util_barrier_init(&barrier, queue->num_threads);
642
643 for (unsigned i = 0; i < queue->num_threads; ++i) {
644 util_queue_fence_init(&fences[i]);
645 util_queue_add_job(queue, &barrier, &fences[i], util_queue_finish_execute, NULL);
646 }
647
648 for (unsigned i = 0; i < queue->num_threads; ++i) {
649 util_queue_fence_wait(&fences[i]);
650 util_queue_fence_destroy(&fences[i]);
651 }
652 mtx_unlock(&queue->finish_lock);
653
654 util_barrier_destroy(&barrier);
655
656 free(fences);
657 }
658
659 int64_t
660 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
661 {
662 /* Allow some flexibility by not raising an error. */
663 if (thread_index >= queue->num_threads)
664 return 0;
665
666 return u_thread_get_time_nano(queue->threads[thread_index]);
667 }