2 * Copyright © 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
31 #include "util/os_time.h"
32 #include "util/u_string.h"
33 #include "util/u_thread.h"
35 static void util_queue_killall_and_wait(struct util_queue
*queue
);
37 /****************************************************************************
38 * Wait for all queues to assert idle when exit() is called.
40 * Otherwise, C++ static variable destructors can be called while threads
41 * are using the static variables.
44 static once_flag atexit_once_flag
= ONCE_FLAG_INIT
;
45 static struct list_head queue_list
;
46 static mtx_t exit_mutex
= _MTX_INITIALIZER_NP
;
51 struct util_queue
*iter
;
53 mtx_lock(&exit_mutex
);
54 /* Wait for all queues to assert idle. */
55 LIST_FOR_EACH_ENTRY(iter
, &queue_list
, head
) {
56 util_queue_killall_and_wait(iter
);
58 mtx_unlock(&exit_mutex
);
64 LIST_INITHEAD(&queue_list
);
65 atexit(atexit_handler
);
69 add_to_atexit_list(struct util_queue
*queue
)
71 call_once(&atexit_once_flag
, global_init
);
73 mtx_lock(&exit_mutex
);
74 LIST_ADD(&queue
->head
, &queue_list
);
75 mtx_unlock(&exit_mutex
);
79 remove_from_atexit_list(struct util_queue
*queue
)
81 struct util_queue
*iter
, *tmp
;
83 mtx_lock(&exit_mutex
);
84 LIST_FOR_EACH_ENTRY_SAFE(iter
, tmp
, &queue_list
, head
) {
86 LIST_DEL(&iter
->head
);
90 mtx_unlock(&exit_mutex
);
93 /****************************************************************************
97 #ifdef UTIL_QUEUE_FENCE_FUTEX
99 do_futex_fence_wait(struct util_queue_fence
*fence
,
100 bool timeout
, int64_t abs_timeout
)
102 uint32_t v
= fence
->val
;
104 ts
.tv_sec
= abs_timeout
/ (1000*1000*1000);
105 ts
.tv_nsec
= abs_timeout
% (1000*1000*1000);
109 v
= p_atomic_cmpxchg(&fence
->val
, 1, 2);
114 int r
= futex_wait(&fence
->val
, 2, timeout
? &ts
: NULL
);
115 if (timeout
&& r
< 0) {
116 if (errno
== ETIMEDOUT
)
127 _util_queue_fence_wait(struct util_queue_fence
*fence
)
129 do_futex_fence_wait(fence
, false, 0);
133 _util_queue_fence_wait_timeout(struct util_queue_fence
*fence
,
136 return do_futex_fence_wait(fence
, true, abs_timeout
);
141 #ifdef UTIL_QUEUE_FENCE_STANDARD
143 util_queue_fence_signal(struct util_queue_fence
*fence
)
145 mtx_lock(&fence
->mutex
);
146 fence
->signalled
= true;
147 cnd_broadcast(&fence
->cond
);
148 mtx_unlock(&fence
->mutex
);
152 _util_queue_fence_wait(struct util_queue_fence
*fence
)
154 mtx_lock(&fence
->mutex
);
155 while (!fence
->signalled
)
156 cnd_wait(&fence
->cond
, &fence
->mutex
);
157 mtx_unlock(&fence
->mutex
);
161 _util_queue_fence_wait_timeout(struct util_queue_fence
*fence
,
164 /* This terrible hack is made necessary by the fact that we really want an
165 * internal interface consistent with os_time_*, but cnd_timedwait is spec'd
166 * to be relative to the TIME_UTC clock.
168 int64_t rel
= abs_timeout
- os_time_get_nano();
173 timespec_get(&ts
, TIME_UTC
);
175 ts
.tv_sec
+= abs_timeout
/ (1000*1000*1000);
176 ts
.tv_nsec
+= abs_timeout
% (1000*1000*1000);
177 if (ts
.tv_nsec
>= (1000*1000*1000)) {
179 ts
.tv_nsec
-= (1000*1000*1000);
182 mtx_lock(&fence
->mutex
);
183 while (!fence
->signalled
) {
184 if (cnd_timedwait(&fence
->cond
, &fence
->mutex
, &ts
) != thrd_success
)
187 mtx_unlock(&fence
->mutex
);
190 return fence
->signalled
;
194 util_queue_fence_init(struct util_queue_fence
*fence
)
196 memset(fence
, 0, sizeof(*fence
));
197 (void) mtx_init(&fence
->mutex
, mtx_plain
);
198 cnd_init(&fence
->cond
);
199 fence
->signalled
= true;
203 util_queue_fence_destroy(struct util_queue_fence
*fence
)
205 assert(fence
->signalled
);
207 /* Ensure that another thread is not in the middle of
208 * util_queue_fence_signal (having set the fence to signalled but still
209 * holding the fence mutex).
211 * A common contract between threads is that as soon as a fence is signalled
212 * by thread A, thread B is allowed to destroy it. Since
213 * util_queue_fence_is_signalled does not lock the fence mutex (for
214 * performance reasons), we must do so here.
216 mtx_lock(&fence
->mutex
);
217 mtx_unlock(&fence
->mutex
);
219 cnd_destroy(&fence
->cond
);
220 mtx_destroy(&fence
->mutex
);
224 /****************************************************************************
225 * util_queue implementation
228 struct thread_input
{
229 struct util_queue
*queue
;
234 util_queue_thread_func(void *input
)
236 struct util_queue
*queue
= ((struct thread_input
*)input
)->queue
;
237 int thread_index
= ((struct thread_input
*)input
)->thread_index
;
243 util_snprintf(name
, sizeof(name
), "%s:%i", queue
->name
, thread_index
);
244 u_thread_setname(name
);
248 struct util_queue_job job
;
250 mtx_lock(&queue
->lock
);
251 assert(queue
->num_queued
>= 0 && queue
->num_queued
<= queue
->max_jobs
);
253 /* wait if the queue is empty */
254 while (!queue
->kill_threads
&& queue
->num_queued
== 0)
255 cnd_wait(&queue
->has_queued_cond
, &queue
->lock
);
257 if (queue
->kill_threads
) {
258 mtx_unlock(&queue
->lock
);
262 job
= queue
->jobs
[queue
->read_idx
];
263 memset(&queue
->jobs
[queue
->read_idx
], 0, sizeof(struct util_queue_job
));
264 queue
->read_idx
= (queue
->read_idx
+ 1) % queue
->max_jobs
;
267 cnd_signal(&queue
->has_space_cond
);
268 mtx_unlock(&queue
->lock
);
271 job
.execute(job
.job
, thread_index
);
272 util_queue_fence_signal(job
.fence
);
274 job
.cleanup(job
.job
, thread_index
);
278 /* signal remaining jobs before terminating */
279 mtx_lock(&queue
->lock
);
280 for (unsigned i
= queue
->read_idx
; i
!= queue
->write_idx
;
281 i
= (i
+ 1) % queue
->max_jobs
) {
282 if (queue
->jobs
[i
].job
) {
283 util_queue_fence_signal(queue
->jobs
[i
].fence
);
284 queue
->jobs
[i
].job
= NULL
;
287 queue
->read_idx
= queue
->write_idx
;
288 queue
->num_queued
= 0;
289 mtx_unlock(&queue
->lock
);
294 util_queue_init(struct util_queue
*queue
,
297 unsigned num_threads
,
302 memset(queue
, 0, sizeof(*queue
));
304 queue
->flags
= flags
;
305 queue
->num_threads
= num_threads
;
306 queue
->max_jobs
= max_jobs
;
308 queue
->jobs
= (struct util_queue_job
*)
309 calloc(max_jobs
, sizeof(struct util_queue_job
));
313 (void) mtx_init(&queue
->lock
, mtx_plain
);
315 queue
->num_queued
= 0;
316 cnd_init(&queue
->has_queued_cond
);
317 cnd_init(&queue
->has_space_cond
);
319 queue
->threads
= (thrd_t
*) calloc(num_threads
, sizeof(thrd_t
));
324 for (i
= 0; i
< num_threads
; i
++) {
325 struct thread_input
*input
=
326 (struct thread_input
*) malloc(sizeof(struct thread_input
));
327 input
->queue
= queue
;
328 input
->thread_index
= i
;
330 queue
->threads
[i
] = u_thread_create(util_queue_thread_func
, input
);
332 if (!queue
->threads
[i
]) {
336 /* no threads created, fail */
339 /* at least one thread created, so use it */
340 queue
->num_threads
= i
;
345 if (flags
& UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
) {
346 #if defined(__linux__) && defined(SCHED_IDLE)
347 struct sched_param sched_param
= {0};
349 /* The nice() function can only set a maximum of 19.
350 * SCHED_IDLE is the same as nice = 20.
352 * Note that Linux only allows decreasing the priority. The original
353 * priority can't be restored.
355 pthread_setschedparam(queue
->threads
[i
], SCHED_IDLE
, &sched_param
);
360 add_to_atexit_list(queue
);
364 free(queue
->threads
);
367 cnd_destroy(&queue
->has_space_cond
);
368 cnd_destroy(&queue
->has_queued_cond
);
369 mtx_destroy(&queue
->lock
);
372 /* also util_queue_is_initialized can be used to check for success */
373 memset(queue
, 0, sizeof(*queue
));
378 util_queue_killall_and_wait(struct util_queue
*queue
)
382 /* Signal all threads to terminate. */
383 mtx_lock(&queue
->lock
);
384 queue
->kill_threads
= 1;
385 cnd_broadcast(&queue
->has_queued_cond
);
386 mtx_unlock(&queue
->lock
);
388 for (i
= 0; i
< queue
->num_threads
; i
++)
389 thrd_join(queue
->threads
[i
], NULL
);
390 queue
->num_threads
= 0;
394 util_queue_destroy(struct util_queue
*queue
)
396 util_queue_killall_and_wait(queue
);
397 remove_from_atexit_list(queue
);
399 cnd_destroy(&queue
->has_space_cond
);
400 cnd_destroy(&queue
->has_queued_cond
);
401 mtx_destroy(&queue
->lock
);
403 free(queue
->threads
);
407 util_queue_add_job(struct util_queue
*queue
,
409 struct util_queue_fence
*fence
,
410 util_queue_execute_func execute
,
411 util_queue_execute_func cleanup
)
413 struct util_queue_job
*ptr
;
415 mtx_lock(&queue
->lock
);
416 if (queue
->kill_threads
) {
417 mtx_unlock(&queue
->lock
);
418 /* well no good option here, but any leaks will be
419 * short-lived as things are shutting down..
424 util_queue_fence_reset(fence
);
426 assert(queue
->num_queued
>= 0 && queue
->num_queued
<= queue
->max_jobs
);
428 if (queue
->num_queued
== queue
->max_jobs
) {
429 if (queue
->flags
& UTIL_QUEUE_INIT_RESIZE_IF_FULL
) {
430 /* If the queue is full, make it larger to avoid waiting for a free
433 unsigned new_max_jobs
= queue
->max_jobs
+ 8;
434 struct util_queue_job
*jobs
=
435 (struct util_queue_job
*)calloc(new_max_jobs
,
436 sizeof(struct util_queue_job
));
439 /* Copy all queued jobs into the new list. */
440 unsigned num_jobs
= 0;
441 unsigned i
= queue
->read_idx
;
444 jobs
[num_jobs
++] = queue
->jobs
[i
];
445 i
= (i
+ 1) % queue
->max_jobs
;
446 } while (i
!= queue
->write_idx
);
448 assert(num_jobs
== queue
->num_queued
);
453 queue
->write_idx
= num_jobs
;
454 queue
->max_jobs
= new_max_jobs
;
456 /* Wait until there is a free slot. */
457 while (queue
->num_queued
== queue
->max_jobs
)
458 cnd_wait(&queue
->has_space_cond
, &queue
->lock
);
462 ptr
= &queue
->jobs
[queue
->write_idx
];
463 assert(ptr
->job
== NULL
);
466 ptr
->execute
= execute
;
467 ptr
->cleanup
= cleanup
;
468 queue
->write_idx
= (queue
->write_idx
+ 1) % queue
->max_jobs
;
471 cnd_signal(&queue
->has_queued_cond
);
472 mtx_unlock(&queue
->lock
);
476 * Remove a queued job. If the job hasn't started execution, it's removed from
477 * the queue. If the job has started execution, the function waits for it to
480 * In all cases, the fence is signalled when the function returns.
482 * The function can be used when destroying an object associated with the job
483 * when you don't care about the job completion state.
486 util_queue_drop_job(struct util_queue
*queue
, struct util_queue_fence
*fence
)
488 bool removed
= false;
490 if (util_queue_fence_is_signalled(fence
))
493 mtx_lock(&queue
->lock
);
494 for (unsigned i
= queue
->read_idx
; i
!= queue
->write_idx
;
495 i
= (i
+ 1) % queue
->max_jobs
) {
496 if (queue
->jobs
[i
].fence
== fence
) {
497 if (queue
->jobs
[i
].cleanup
)
498 queue
->jobs
[i
].cleanup(queue
->jobs
[i
].job
, -1);
500 /* Just clear it. The threads will treat as a no-op job. */
501 memset(&queue
->jobs
[i
], 0, sizeof(queue
->jobs
[i
]));
506 mtx_unlock(&queue
->lock
);
509 util_queue_fence_signal(fence
);
511 util_queue_fence_wait(fence
);
515 util_queue_finish_execute(void *data
, int num_thread
)
517 util_barrier
*barrier
= data
;
518 util_barrier_wait(barrier
);
522 * Wait until all previously added jobs have completed.
525 util_queue_finish(struct util_queue
*queue
)
527 util_barrier barrier
;
528 struct util_queue_fence
*fences
= malloc(queue
->num_threads
* sizeof(*fences
));
530 util_barrier_init(&barrier
, queue
->num_threads
);
532 for (unsigned i
= 0; i
< queue
->num_threads
; ++i
) {
533 util_queue_fence_init(&fences
[i
]);
534 util_queue_add_job(queue
, &barrier
, &fences
[i
], util_queue_finish_execute
, NULL
);
537 for (unsigned i
= 0; i
< queue
->num_threads
; ++i
) {
538 util_queue_fence_wait(&fences
[i
]);
539 util_queue_fence_destroy(&fences
[i
]);
542 util_barrier_destroy(&barrier
);
548 util_queue_get_thread_time_nano(struct util_queue
*queue
, unsigned thread_index
)
550 /* Allow some flexibility by not raising an error. */
551 if (thread_index
>= queue
->num_threads
)
554 return u_thread_get_time_nano(queue
->threads
[thread_index
]);