2 * Copyright © 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
29 #include "c11/threads.h"
31 #include "util/os_time.h"
32 #include "util/u_string.h"
33 #include "util/u_thread.h"
34 #include "u_process.h"
36 #if defined(__linux__)
38 #include <sys/resource.h>
39 #include <sys/syscall.h>
44 #define S_256MB (256 * 1024 * 1024)
47 util_queue_kill_threads(struct util_queue
*queue
, unsigned keep_num_threads
,
50 /****************************************************************************
51 * Wait for all queues to assert idle when exit() is called.
53 * Otherwise, C++ static variable destructors can be called while threads
54 * are using the static variables.
57 static once_flag atexit_once_flag
= ONCE_FLAG_INIT
;
58 static struct list_head queue_list
;
59 static mtx_t exit_mutex
= _MTX_INITIALIZER_NP
;
64 struct util_queue
*iter
;
66 mtx_lock(&exit_mutex
);
67 /* Wait for all queues to assert idle. */
68 LIST_FOR_EACH_ENTRY(iter
, &queue_list
, head
) {
69 util_queue_kill_threads(iter
, 0, false);
71 mtx_unlock(&exit_mutex
);
77 list_inithead(&queue_list
);
78 atexit(atexit_handler
);
82 add_to_atexit_list(struct util_queue
*queue
)
84 call_once(&atexit_once_flag
, global_init
);
86 mtx_lock(&exit_mutex
);
87 list_add(&queue
->head
, &queue_list
);
88 mtx_unlock(&exit_mutex
);
92 remove_from_atexit_list(struct util_queue
*queue
)
94 struct util_queue
*iter
, *tmp
;
96 mtx_lock(&exit_mutex
);
97 LIST_FOR_EACH_ENTRY_SAFE(iter
, tmp
, &queue_list
, head
) {
99 list_del(&iter
->head
);
103 mtx_unlock(&exit_mutex
);
106 /****************************************************************************
110 #ifdef UTIL_QUEUE_FENCE_FUTEX
112 do_futex_fence_wait(struct util_queue_fence
*fence
,
113 bool timeout
, int64_t abs_timeout
)
115 uint32_t v
= fence
->val
;
117 ts
.tv_sec
= abs_timeout
/ (1000*1000*1000);
118 ts
.tv_nsec
= abs_timeout
% (1000*1000*1000);
122 v
= p_atomic_cmpxchg(&fence
->val
, 1, 2);
127 int r
= futex_wait(&fence
->val
, 2, timeout
? &ts
: NULL
);
128 if (timeout
&& r
< 0) {
129 if (errno
== ETIMEDOUT
)
140 _util_queue_fence_wait(struct util_queue_fence
*fence
)
142 do_futex_fence_wait(fence
, false, 0);
146 _util_queue_fence_wait_timeout(struct util_queue_fence
*fence
,
149 return do_futex_fence_wait(fence
, true, abs_timeout
);
154 #ifdef UTIL_QUEUE_FENCE_STANDARD
156 util_queue_fence_signal(struct util_queue_fence
*fence
)
158 mtx_lock(&fence
->mutex
);
159 fence
->signalled
= true;
160 cnd_broadcast(&fence
->cond
);
161 mtx_unlock(&fence
->mutex
);
165 _util_queue_fence_wait(struct util_queue_fence
*fence
)
167 mtx_lock(&fence
->mutex
);
168 while (!fence
->signalled
)
169 cnd_wait(&fence
->cond
, &fence
->mutex
);
170 mtx_unlock(&fence
->mutex
);
174 _util_queue_fence_wait_timeout(struct util_queue_fence
*fence
,
177 /* This terrible hack is made necessary by the fact that we really want an
178 * internal interface consistent with os_time_*, but cnd_timedwait is spec'd
179 * to be relative to the TIME_UTC clock.
181 int64_t rel
= abs_timeout
- os_time_get_nano();
186 timespec_get(&ts
, TIME_UTC
);
188 ts
.tv_sec
+= abs_timeout
/ (1000*1000*1000);
189 ts
.tv_nsec
+= abs_timeout
% (1000*1000*1000);
190 if (ts
.tv_nsec
>= (1000*1000*1000)) {
192 ts
.tv_nsec
-= (1000*1000*1000);
195 mtx_lock(&fence
->mutex
);
196 while (!fence
->signalled
) {
197 if (cnd_timedwait(&fence
->cond
, &fence
->mutex
, &ts
) != thrd_success
)
200 mtx_unlock(&fence
->mutex
);
203 return fence
->signalled
;
207 util_queue_fence_init(struct util_queue_fence
*fence
)
209 memset(fence
, 0, sizeof(*fence
));
210 (void) mtx_init(&fence
->mutex
, mtx_plain
);
211 cnd_init(&fence
->cond
);
212 fence
->signalled
= true;
216 util_queue_fence_destroy(struct util_queue_fence
*fence
)
218 assert(fence
->signalled
);
220 /* Ensure that another thread is not in the middle of
221 * util_queue_fence_signal (having set the fence to signalled but still
222 * holding the fence mutex).
224 * A common contract between threads is that as soon as a fence is signalled
225 * by thread A, thread B is allowed to destroy it. Since
226 * util_queue_fence_is_signalled does not lock the fence mutex (for
227 * performance reasons), we must do so here.
229 mtx_lock(&fence
->mutex
);
230 mtx_unlock(&fence
->mutex
);
232 cnd_destroy(&fence
->cond
);
233 mtx_destroy(&fence
->mutex
);
237 /****************************************************************************
238 * util_queue implementation
241 struct thread_input
{
242 struct util_queue
*queue
;
247 util_queue_thread_func(void *input
)
249 struct util_queue
*queue
= ((struct thread_input
*)input
)->queue
;
250 int thread_index
= ((struct thread_input
*)input
)->thread_index
;
254 #ifdef HAVE_PTHREAD_SETAFFINITY
255 if (queue
->flags
& UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY
) {
256 /* Don't inherit the thread affinity from the parent thread.
261 for (unsigned i
= 0; i
< CPU_SETSIZE
; i
++)
264 pthread_setaffinity_np(pthread_self(), sizeof(cpuset
), &cpuset
);
268 #if defined(__linux__)
269 if (queue
->flags
& UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
) {
270 /* The nice() function can only set a maximum of 19. */
271 setpriority(PRIO_PROCESS
, syscall(SYS_gettid
), 19);
275 if (strlen(queue
->name
) > 0) {
277 snprintf(name
, sizeof(name
), "%s%i", queue
->name
, thread_index
);
278 u_thread_setname(name
);
282 struct util_queue_job job
;
284 mtx_lock(&queue
->lock
);
285 assert(queue
->num_queued
>= 0 && queue
->num_queued
<= queue
->max_jobs
);
287 /* wait if the queue is empty */
288 while (thread_index
< queue
->num_threads
&& queue
->num_queued
== 0)
289 cnd_wait(&queue
->has_queued_cond
, &queue
->lock
);
291 /* only kill threads that are above "num_threads" */
292 if (thread_index
>= queue
->num_threads
) {
293 mtx_unlock(&queue
->lock
);
297 job
= queue
->jobs
[queue
->read_idx
];
298 memset(&queue
->jobs
[queue
->read_idx
], 0, sizeof(struct util_queue_job
));
299 queue
->read_idx
= (queue
->read_idx
+ 1) % queue
->max_jobs
;
302 cnd_signal(&queue
->has_space_cond
);
304 queue
->total_jobs_size
-= job
.job_size
;
305 mtx_unlock(&queue
->lock
);
308 job
.execute(job
.job
, thread_index
);
309 util_queue_fence_signal(job
.fence
);
311 job
.cleanup(job
.job
, thread_index
);
315 /* signal remaining jobs if all threads are being terminated */
316 mtx_lock(&queue
->lock
);
317 if (queue
->num_threads
== 0) {
318 for (unsigned i
= queue
->read_idx
; i
!= queue
->write_idx
;
319 i
= (i
+ 1) % queue
->max_jobs
) {
320 if (queue
->jobs
[i
].job
) {
321 util_queue_fence_signal(queue
->jobs
[i
].fence
);
322 queue
->jobs
[i
].job
= NULL
;
325 queue
->read_idx
= queue
->write_idx
;
326 queue
->num_queued
= 0;
328 mtx_unlock(&queue
->lock
);
333 util_queue_create_thread(struct util_queue
*queue
, unsigned index
)
335 struct thread_input
*input
=
336 (struct thread_input
*) malloc(sizeof(struct thread_input
));
337 input
->queue
= queue
;
338 input
->thread_index
= index
;
340 queue
->threads
[index
] = u_thread_create(util_queue_thread_func
, input
);
342 if (!queue
->threads
[index
]) {
347 if (queue
->flags
& UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
) {
348 #if defined(__linux__) && defined(SCHED_BATCH)
349 struct sched_param sched_param
= {0};
351 /* The nice() function can only set a maximum of 19.
352 * SCHED_BATCH gives the scheduler a hint that this is a latency
353 * insensitive thread.
355 * Note that Linux only allows decreasing the priority. The original
356 * priority can't be restored.
358 pthread_setschedparam(queue
->threads
[index
], SCHED_BATCH
, &sched_param
);
365 util_queue_adjust_num_threads(struct util_queue
*queue
, unsigned num_threads
)
367 num_threads
= MIN2(num_threads
, queue
->max_threads
);
368 num_threads
= MAX2(num_threads
, 1);
370 mtx_lock(&queue
->finish_lock
);
371 unsigned old_num_threads
= queue
->num_threads
;
373 if (num_threads
== old_num_threads
) {
374 mtx_unlock(&queue
->finish_lock
);
378 if (num_threads
< old_num_threads
) {
379 util_queue_kill_threads(queue
, num_threads
, true);
380 mtx_unlock(&queue
->finish_lock
);
386 * We need to update num_threads first, because threads terminate
387 * when thread_index < num_threads.
389 queue
->num_threads
= num_threads
;
390 for (unsigned i
= old_num_threads
; i
< num_threads
; i
++) {
391 if (!util_queue_create_thread(queue
, i
))
394 mtx_unlock(&queue
->finish_lock
);
398 util_queue_init(struct util_queue
*queue
,
401 unsigned num_threads
,
406 /* Form the thread name from process_name and name, limited to 13
407 * characters. Characters 14-15 are reserved for the thread number.
408 * Character 16 should be 0. Final form: "process:name12"
410 * If name is too long, it's truncated. If any space is left, the process
413 const char *process_name
= util_get_process_name();
414 int process_len
= process_name
? strlen(process_name
) : 0;
415 int name_len
= strlen(name
);
416 const int max_chars
= sizeof(queue
->name
) - 1;
418 name_len
= MIN2(name_len
, max_chars
);
420 /* See if there is any space left for the process name, reserve 1 for
422 process_len
= MIN2(process_len
, max_chars
- name_len
- 1);
423 process_len
= MAX2(process_len
, 0);
425 memset(queue
, 0, sizeof(*queue
));
428 snprintf(queue
->name
, sizeof(queue
->name
), "%.*s:%s",
429 process_len
, process_name
, name
);
431 snprintf(queue
->name
, sizeof(queue
->name
), "%s", name
);
434 queue
->flags
= flags
;
435 queue
->max_threads
= num_threads
;
436 queue
->num_threads
= num_threads
;
437 queue
->max_jobs
= max_jobs
;
439 queue
->jobs
= (struct util_queue_job
*)
440 calloc(max_jobs
, sizeof(struct util_queue_job
));
444 (void) mtx_init(&queue
->lock
, mtx_plain
);
445 (void) mtx_init(&queue
->finish_lock
, mtx_plain
);
447 queue
->num_queued
= 0;
448 cnd_init(&queue
->has_queued_cond
);
449 cnd_init(&queue
->has_space_cond
);
451 queue
->threads
= (thrd_t
*) calloc(num_threads
, sizeof(thrd_t
));
456 for (i
= 0; i
< num_threads
; i
++) {
457 if (!util_queue_create_thread(queue
, i
)) {
459 /* no threads created, fail */
462 /* at least one thread created, so use it */
463 queue
->num_threads
= i
;
469 add_to_atexit_list(queue
);
473 free(queue
->threads
);
476 cnd_destroy(&queue
->has_space_cond
);
477 cnd_destroy(&queue
->has_queued_cond
);
478 mtx_destroy(&queue
->lock
);
481 /* also util_queue_is_initialized can be used to check for success */
482 memset(queue
, 0, sizeof(*queue
));
487 util_queue_kill_threads(struct util_queue
*queue
, unsigned keep_num_threads
,
492 /* Signal all threads to terminate. */
494 mtx_lock(&queue
->finish_lock
);
496 if (keep_num_threads
>= queue
->num_threads
) {
497 mtx_unlock(&queue
->finish_lock
);
501 mtx_lock(&queue
->lock
);
502 unsigned old_num_threads
= queue
->num_threads
;
503 /* Setting num_threads is what causes the threads to terminate.
504 * Then cnd_broadcast wakes them up and they will exit their function.
506 queue
->num_threads
= keep_num_threads
;
507 cnd_broadcast(&queue
->has_queued_cond
);
508 mtx_unlock(&queue
->lock
);
510 for (i
= keep_num_threads
; i
< old_num_threads
; i
++)
511 thrd_join(queue
->threads
[i
], NULL
);
514 mtx_unlock(&queue
->finish_lock
);
518 util_queue_destroy(struct util_queue
*queue
)
520 util_queue_kill_threads(queue
, 0, false);
521 remove_from_atexit_list(queue
);
523 cnd_destroy(&queue
->has_space_cond
);
524 cnd_destroy(&queue
->has_queued_cond
);
525 mtx_destroy(&queue
->finish_lock
);
526 mtx_destroy(&queue
->lock
);
528 free(queue
->threads
);
532 util_queue_add_job(struct util_queue
*queue
,
534 struct util_queue_fence
*fence
,
535 util_queue_execute_func execute
,
536 util_queue_execute_func cleanup
,
537 const size_t job_size
)
539 struct util_queue_job
*ptr
;
541 mtx_lock(&queue
->lock
);
542 if (queue
->num_threads
== 0) {
543 mtx_unlock(&queue
->lock
);
544 /* well no good option here, but any leaks will be
545 * short-lived as things are shutting down..
550 util_queue_fence_reset(fence
);
552 assert(queue
->num_queued
>= 0 && queue
->num_queued
<= queue
->max_jobs
);
554 if (queue
->num_queued
== queue
->max_jobs
) {
555 if (queue
->flags
& UTIL_QUEUE_INIT_RESIZE_IF_FULL
&&
556 queue
->total_jobs_size
+ job_size
< S_256MB
) {
557 /* If the queue is full, make it larger to avoid waiting for a free
560 unsigned new_max_jobs
= queue
->max_jobs
+ 8;
561 struct util_queue_job
*jobs
=
562 (struct util_queue_job
*)calloc(new_max_jobs
,
563 sizeof(struct util_queue_job
));
566 /* Copy all queued jobs into the new list. */
567 unsigned num_jobs
= 0;
568 unsigned i
= queue
->read_idx
;
571 jobs
[num_jobs
++] = queue
->jobs
[i
];
572 i
= (i
+ 1) % queue
->max_jobs
;
573 } while (i
!= queue
->write_idx
);
575 assert(num_jobs
== queue
->num_queued
);
580 queue
->write_idx
= num_jobs
;
581 queue
->max_jobs
= new_max_jobs
;
583 /* Wait until there is a free slot. */
584 while (queue
->num_queued
== queue
->max_jobs
)
585 cnd_wait(&queue
->has_space_cond
, &queue
->lock
);
589 ptr
= &queue
->jobs
[queue
->write_idx
];
590 assert(ptr
->job
== NULL
);
593 ptr
->execute
= execute
;
594 ptr
->cleanup
= cleanup
;
595 ptr
->job_size
= job_size
;
597 queue
->write_idx
= (queue
->write_idx
+ 1) % queue
->max_jobs
;
598 queue
->total_jobs_size
+= ptr
->job_size
;
601 cnd_signal(&queue
->has_queued_cond
);
602 mtx_unlock(&queue
->lock
);
606 * Remove a queued job. If the job hasn't started execution, it's removed from
607 * the queue. If the job has started execution, the function waits for it to
610 * In all cases, the fence is signalled when the function returns.
612 * The function can be used when destroying an object associated with the job
613 * when you don't care about the job completion state.
616 util_queue_drop_job(struct util_queue
*queue
, struct util_queue_fence
*fence
)
618 bool removed
= false;
620 if (util_queue_fence_is_signalled(fence
))
623 mtx_lock(&queue
->lock
);
624 for (unsigned i
= queue
->read_idx
; i
!= queue
->write_idx
;
625 i
= (i
+ 1) % queue
->max_jobs
) {
626 if (queue
->jobs
[i
].fence
== fence
) {
627 if (queue
->jobs
[i
].cleanup
)
628 queue
->jobs
[i
].cleanup(queue
->jobs
[i
].job
, -1);
630 /* Just clear it. The threads will treat as a no-op job. */
631 memset(&queue
->jobs
[i
], 0, sizeof(queue
->jobs
[i
]));
636 mtx_unlock(&queue
->lock
);
639 util_queue_fence_signal(fence
);
641 util_queue_fence_wait(fence
);
645 util_queue_finish_execute(void *data
, int num_thread
)
647 util_barrier
*barrier
= data
;
648 util_barrier_wait(barrier
);
652 * Wait until all previously added jobs have completed.
655 util_queue_finish(struct util_queue
*queue
)
657 util_barrier barrier
;
658 struct util_queue_fence
*fences
;
660 /* If 2 threads were adding jobs for 2 different barries at the same time,
661 * a deadlock would happen, because 1 barrier requires that all threads
662 * wait for it exclusively.
664 mtx_lock(&queue
->finish_lock
);
666 /* The number of threads can be changed to 0, e.g. by the atexit handler. */
667 if (!queue
->num_threads
) {
668 mtx_unlock(&queue
->finish_lock
);
672 fences
= malloc(queue
->num_threads
* sizeof(*fences
));
673 util_barrier_init(&barrier
, queue
->num_threads
);
675 for (unsigned i
= 0; i
< queue
->num_threads
; ++i
) {
676 util_queue_fence_init(&fences
[i
]);
677 util_queue_add_job(queue
, &barrier
, &fences
[i
],
678 util_queue_finish_execute
, NULL
, 0);
681 for (unsigned i
= 0; i
< queue
->num_threads
; ++i
) {
682 util_queue_fence_wait(&fences
[i
]);
683 util_queue_fence_destroy(&fences
[i
]);
685 mtx_unlock(&queue
->finish_lock
);
687 util_barrier_destroy(&barrier
);
693 util_queue_get_thread_time_nano(struct util_queue
*queue
, unsigned thread_index
)
695 /* Allow some flexibility by not raising an error. */
696 if (thread_index
>= queue
->num_threads
)
699 return u_thread_get_time_nano(queue
->threads
[thread_index
]);