util/queue: move thread creation into a separate function
[mesa.git] / src / util / u_queue.c
1 /*
2 * Copyright © 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "u_queue.h"
28
29 #include <time.h>
30
31 #include "util/os_time.h"
32 #include "util/u_string.h"
33 #include "util/u_thread.h"
34 #include "u_process.h"
35
36 static void util_queue_killall_and_wait(struct util_queue *queue);
37
38 /****************************************************************************
39 * Wait for all queues to assert idle when exit() is called.
40 *
41 * Otherwise, C++ static variable destructors can be called while threads
42 * are using the static variables.
43 */
44
45 static once_flag atexit_once_flag = ONCE_FLAG_INIT;
46 static struct list_head queue_list;
47 static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
48
49 static void
50 atexit_handler(void)
51 {
52 struct util_queue *iter;
53
54 mtx_lock(&exit_mutex);
55 /* Wait for all queues to assert idle. */
56 LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
57 util_queue_killall_and_wait(iter);
58 }
59 mtx_unlock(&exit_mutex);
60 }
61
62 static void
63 global_init(void)
64 {
65 LIST_INITHEAD(&queue_list);
66 atexit(atexit_handler);
67 }
68
69 static void
70 add_to_atexit_list(struct util_queue *queue)
71 {
72 call_once(&atexit_once_flag, global_init);
73
74 mtx_lock(&exit_mutex);
75 LIST_ADD(&queue->head, &queue_list);
76 mtx_unlock(&exit_mutex);
77 }
78
79 static void
80 remove_from_atexit_list(struct util_queue *queue)
81 {
82 struct util_queue *iter, *tmp;
83
84 mtx_lock(&exit_mutex);
85 LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
86 if (iter == queue) {
87 LIST_DEL(&iter->head);
88 break;
89 }
90 }
91 mtx_unlock(&exit_mutex);
92 }
93
94 /****************************************************************************
95 * util_queue_fence
96 */
97
98 #ifdef UTIL_QUEUE_FENCE_FUTEX
99 static bool
100 do_futex_fence_wait(struct util_queue_fence *fence,
101 bool timeout, int64_t abs_timeout)
102 {
103 uint32_t v = fence->val;
104 struct timespec ts;
105 ts.tv_sec = abs_timeout / (1000*1000*1000);
106 ts.tv_nsec = abs_timeout % (1000*1000*1000);
107
108 while (v != 0) {
109 if (v != 2) {
110 v = p_atomic_cmpxchg(&fence->val, 1, 2);
111 if (v == 0)
112 return true;
113 }
114
115 int r = futex_wait(&fence->val, 2, timeout ? &ts : NULL);
116 if (timeout && r < 0) {
117 if (errno == ETIMEDOUT)
118 return false;
119 }
120
121 v = fence->val;
122 }
123
124 return true;
125 }
126
127 void
128 _util_queue_fence_wait(struct util_queue_fence *fence)
129 {
130 do_futex_fence_wait(fence, false, 0);
131 }
132
133 bool
134 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
135 int64_t abs_timeout)
136 {
137 return do_futex_fence_wait(fence, true, abs_timeout);
138 }
139
140 #endif
141
142 #ifdef UTIL_QUEUE_FENCE_STANDARD
143 void
144 util_queue_fence_signal(struct util_queue_fence *fence)
145 {
146 mtx_lock(&fence->mutex);
147 fence->signalled = true;
148 cnd_broadcast(&fence->cond);
149 mtx_unlock(&fence->mutex);
150 }
151
152 void
153 _util_queue_fence_wait(struct util_queue_fence *fence)
154 {
155 mtx_lock(&fence->mutex);
156 while (!fence->signalled)
157 cnd_wait(&fence->cond, &fence->mutex);
158 mtx_unlock(&fence->mutex);
159 }
160
161 bool
162 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
163 int64_t abs_timeout)
164 {
165 /* This terrible hack is made necessary by the fact that we really want an
166 * internal interface consistent with os_time_*, but cnd_timedwait is spec'd
167 * to be relative to the TIME_UTC clock.
168 */
169 int64_t rel = abs_timeout - os_time_get_nano();
170
171 if (rel > 0) {
172 struct timespec ts;
173
174 timespec_get(&ts, TIME_UTC);
175
176 ts.tv_sec += abs_timeout / (1000*1000*1000);
177 ts.tv_nsec += abs_timeout % (1000*1000*1000);
178 if (ts.tv_nsec >= (1000*1000*1000)) {
179 ts.tv_sec++;
180 ts.tv_nsec -= (1000*1000*1000);
181 }
182
183 mtx_lock(&fence->mutex);
184 while (!fence->signalled) {
185 if (cnd_timedwait(&fence->cond, &fence->mutex, &ts) != thrd_success)
186 break;
187 }
188 mtx_unlock(&fence->mutex);
189 }
190
191 return fence->signalled;
192 }
193
194 void
195 util_queue_fence_init(struct util_queue_fence *fence)
196 {
197 memset(fence, 0, sizeof(*fence));
198 (void) mtx_init(&fence->mutex, mtx_plain);
199 cnd_init(&fence->cond);
200 fence->signalled = true;
201 }
202
203 void
204 util_queue_fence_destroy(struct util_queue_fence *fence)
205 {
206 assert(fence->signalled);
207
208 /* Ensure that another thread is not in the middle of
209 * util_queue_fence_signal (having set the fence to signalled but still
210 * holding the fence mutex).
211 *
212 * A common contract between threads is that as soon as a fence is signalled
213 * by thread A, thread B is allowed to destroy it. Since
214 * util_queue_fence_is_signalled does not lock the fence mutex (for
215 * performance reasons), we must do so here.
216 */
217 mtx_lock(&fence->mutex);
218 mtx_unlock(&fence->mutex);
219
220 cnd_destroy(&fence->cond);
221 mtx_destroy(&fence->mutex);
222 }
223 #endif
224
225 /****************************************************************************
226 * util_queue implementation
227 */
228
229 struct thread_input {
230 struct util_queue *queue;
231 int thread_index;
232 };
233
234 static int
235 util_queue_thread_func(void *input)
236 {
237 struct util_queue *queue = ((struct thread_input*)input)->queue;
238 int thread_index = ((struct thread_input*)input)->thread_index;
239
240 free(input);
241
242 #ifdef HAVE_PTHREAD_SETAFFINITY
243 if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) {
244 /* Don't inherit the thread affinity from the parent thread.
245 * Set the full mask.
246 */
247 cpu_set_t cpuset;
248 CPU_ZERO(&cpuset);
249 for (unsigned i = 0; i < CPU_SETSIZE; i++)
250 CPU_SET(i, &cpuset);
251
252 pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
253 }
254 #endif
255
256 if (strlen(queue->name) > 0) {
257 char name[16];
258 util_snprintf(name, sizeof(name), "%s%i", queue->name, thread_index);
259 u_thread_setname(name);
260 }
261
262 while (1) {
263 struct util_queue_job job;
264
265 mtx_lock(&queue->lock);
266 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
267
268 /* wait if the queue is empty */
269 while (!queue->kill_threads && queue->num_queued == 0)
270 cnd_wait(&queue->has_queued_cond, &queue->lock);
271
272 if (queue->kill_threads) {
273 mtx_unlock(&queue->lock);
274 break;
275 }
276
277 job = queue->jobs[queue->read_idx];
278 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
279 queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
280
281 queue->num_queued--;
282 cnd_signal(&queue->has_space_cond);
283 mtx_unlock(&queue->lock);
284
285 if (job.job) {
286 job.execute(job.job, thread_index);
287 util_queue_fence_signal(job.fence);
288 if (job.cleanup)
289 job.cleanup(job.job, thread_index);
290 }
291 }
292
293 /* signal remaining jobs before terminating */
294 mtx_lock(&queue->lock);
295 for (unsigned i = queue->read_idx; i != queue->write_idx;
296 i = (i + 1) % queue->max_jobs) {
297 if (queue->jobs[i].job) {
298 util_queue_fence_signal(queue->jobs[i].fence);
299 queue->jobs[i].job = NULL;
300 }
301 }
302 queue->read_idx = queue->write_idx;
303 queue->num_queued = 0;
304 mtx_unlock(&queue->lock);
305 return 0;
306 }
307
308 static bool
309 util_queue_create_thread(struct util_queue *queue, unsigned index)
310 {
311 struct thread_input *input =
312 (struct thread_input *) malloc(sizeof(struct thread_input));
313 input->queue = queue;
314 input->thread_index = index;
315
316 queue->threads[index] = u_thread_create(util_queue_thread_func, input);
317
318 if (!queue->threads[index]) {
319 free(input);
320 return false;
321 }
322
323 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
324 #if defined(__linux__) && defined(SCHED_IDLE)
325 struct sched_param sched_param = {0};
326
327 /* The nice() function can only set a maximum of 19.
328 * SCHED_IDLE is the same as nice = 20.
329 *
330 * Note that Linux only allows decreasing the priority. The original
331 * priority can't be restored.
332 */
333 pthread_setschedparam(queue->threads[index], SCHED_IDLE, &sched_param);
334 #endif
335 }
336 return true;
337 }
338
339 bool
340 util_queue_init(struct util_queue *queue,
341 const char *name,
342 unsigned max_jobs,
343 unsigned num_threads,
344 unsigned flags)
345 {
346 unsigned i;
347
348 /* Form the thread name from process_name and name, limited to 13
349 * characters. Characters 14-15 are reserved for the thread number.
350 * Character 16 should be 0. Final form: "process:name12"
351 *
352 * If name is too long, it's truncated. If any space is left, the process
353 * name fills it.
354 */
355 const char *process_name = util_get_process_name();
356 int process_len = process_name ? strlen(process_name) : 0;
357 int name_len = strlen(name);
358 const int max_chars = sizeof(queue->name) - 1;
359
360 name_len = MIN2(name_len, max_chars);
361
362 /* See if there is any space left for the process name, reserve 1 for
363 * the colon. */
364 process_len = MIN2(process_len, max_chars - name_len - 1);
365 process_len = MAX2(process_len, 0);
366
367 memset(queue, 0, sizeof(*queue));
368
369 if (process_len) {
370 util_snprintf(queue->name, sizeof(queue->name), "%.*s:%s",
371 process_len, process_name, name);
372 } else {
373 util_snprintf(queue->name, sizeof(queue->name), "%s", name);
374 }
375
376 queue->flags = flags;
377 queue->num_threads = num_threads;
378 queue->max_jobs = max_jobs;
379
380 queue->jobs = (struct util_queue_job*)
381 calloc(max_jobs, sizeof(struct util_queue_job));
382 if (!queue->jobs)
383 goto fail;
384
385 (void) mtx_init(&queue->lock, mtx_plain);
386 (void) mtx_init(&queue->finish_lock, mtx_plain);
387
388 queue->num_queued = 0;
389 cnd_init(&queue->has_queued_cond);
390 cnd_init(&queue->has_space_cond);
391
392 queue->threads = (thrd_t*) calloc(num_threads, sizeof(thrd_t));
393 if (!queue->threads)
394 goto fail;
395
396 /* start threads */
397 for (i = 0; i < num_threads; i++) {
398 if (!util_queue_create_thread(queue, i)) {
399 if (i == 0) {
400 /* no threads created, fail */
401 goto fail;
402 } else {
403 /* at least one thread created, so use it */
404 queue->num_threads = i;
405 break;
406 }
407 }
408 }
409
410 add_to_atexit_list(queue);
411 return true;
412
413 fail:
414 free(queue->threads);
415
416 if (queue->jobs) {
417 cnd_destroy(&queue->has_space_cond);
418 cnd_destroy(&queue->has_queued_cond);
419 mtx_destroy(&queue->lock);
420 free(queue->jobs);
421 }
422 /* also util_queue_is_initialized can be used to check for success */
423 memset(queue, 0, sizeof(*queue));
424 return false;
425 }
426
427 static void
428 util_queue_killall_and_wait(struct util_queue *queue)
429 {
430 unsigned i;
431
432 /* Signal all threads to terminate. */
433 mtx_lock(&queue->lock);
434 queue->kill_threads = 1;
435 cnd_broadcast(&queue->has_queued_cond);
436 mtx_unlock(&queue->lock);
437
438 for (i = 0; i < queue->num_threads; i++)
439 thrd_join(queue->threads[i], NULL);
440 queue->num_threads = 0;
441 }
442
443 void
444 util_queue_destroy(struct util_queue *queue)
445 {
446 util_queue_killall_and_wait(queue);
447 remove_from_atexit_list(queue);
448
449 cnd_destroy(&queue->has_space_cond);
450 cnd_destroy(&queue->has_queued_cond);
451 mtx_destroy(&queue->finish_lock);
452 mtx_destroy(&queue->lock);
453 free(queue->jobs);
454 free(queue->threads);
455 }
456
457 void
458 util_queue_add_job(struct util_queue *queue,
459 void *job,
460 struct util_queue_fence *fence,
461 util_queue_execute_func execute,
462 util_queue_execute_func cleanup)
463 {
464 struct util_queue_job *ptr;
465
466 mtx_lock(&queue->lock);
467 if (queue->kill_threads) {
468 mtx_unlock(&queue->lock);
469 /* well no good option here, but any leaks will be
470 * short-lived as things are shutting down..
471 */
472 return;
473 }
474
475 util_queue_fence_reset(fence);
476
477 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
478
479 if (queue->num_queued == queue->max_jobs) {
480 if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL) {
481 /* If the queue is full, make it larger to avoid waiting for a free
482 * slot.
483 */
484 unsigned new_max_jobs = queue->max_jobs + 8;
485 struct util_queue_job *jobs =
486 (struct util_queue_job*)calloc(new_max_jobs,
487 sizeof(struct util_queue_job));
488 assert(jobs);
489
490 /* Copy all queued jobs into the new list. */
491 unsigned num_jobs = 0;
492 unsigned i = queue->read_idx;
493
494 do {
495 jobs[num_jobs++] = queue->jobs[i];
496 i = (i + 1) % queue->max_jobs;
497 } while (i != queue->write_idx);
498
499 assert(num_jobs == queue->num_queued);
500
501 free(queue->jobs);
502 queue->jobs = jobs;
503 queue->read_idx = 0;
504 queue->write_idx = num_jobs;
505 queue->max_jobs = new_max_jobs;
506 } else {
507 /* Wait until there is a free slot. */
508 while (queue->num_queued == queue->max_jobs)
509 cnd_wait(&queue->has_space_cond, &queue->lock);
510 }
511 }
512
513 ptr = &queue->jobs[queue->write_idx];
514 assert(ptr->job == NULL);
515 ptr->job = job;
516 ptr->fence = fence;
517 ptr->execute = execute;
518 ptr->cleanup = cleanup;
519 queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
520
521 queue->num_queued++;
522 cnd_signal(&queue->has_queued_cond);
523 mtx_unlock(&queue->lock);
524 }
525
526 /**
527 * Remove a queued job. If the job hasn't started execution, it's removed from
528 * the queue. If the job has started execution, the function waits for it to
529 * complete.
530 *
531 * In all cases, the fence is signalled when the function returns.
532 *
533 * The function can be used when destroying an object associated with the job
534 * when you don't care about the job completion state.
535 */
536 void
537 util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence)
538 {
539 bool removed = false;
540
541 if (util_queue_fence_is_signalled(fence))
542 return;
543
544 mtx_lock(&queue->lock);
545 for (unsigned i = queue->read_idx; i != queue->write_idx;
546 i = (i + 1) % queue->max_jobs) {
547 if (queue->jobs[i].fence == fence) {
548 if (queue->jobs[i].cleanup)
549 queue->jobs[i].cleanup(queue->jobs[i].job, -1);
550
551 /* Just clear it. The threads will treat as a no-op job. */
552 memset(&queue->jobs[i], 0, sizeof(queue->jobs[i]));
553 removed = true;
554 break;
555 }
556 }
557 mtx_unlock(&queue->lock);
558
559 if (removed)
560 util_queue_fence_signal(fence);
561 else
562 util_queue_fence_wait(fence);
563 }
564
565 static void
566 util_queue_finish_execute(void *data, int num_thread)
567 {
568 util_barrier *barrier = data;
569 util_barrier_wait(barrier);
570 }
571
572 /**
573 * Wait until all previously added jobs have completed.
574 */
575 void
576 util_queue_finish(struct util_queue *queue)
577 {
578 util_barrier barrier;
579 struct util_queue_fence *fences = malloc(queue->num_threads * sizeof(*fences));
580
581 util_barrier_init(&barrier, queue->num_threads);
582
583 /* If 2 threads were adding jobs for 2 different barries at the same time,
584 * a deadlock would happen, because 1 barrier requires that all threads
585 * wait for it exclusively.
586 */
587 mtx_lock(&queue->finish_lock);
588
589 for (unsigned i = 0; i < queue->num_threads; ++i) {
590 util_queue_fence_init(&fences[i]);
591 util_queue_add_job(queue, &barrier, &fences[i], util_queue_finish_execute, NULL);
592 }
593
594 for (unsigned i = 0; i < queue->num_threads; ++i) {
595 util_queue_fence_wait(&fences[i]);
596 util_queue_fence_destroy(&fences[i]);
597 }
598 mtx_unlock(&queue->finish_lock);
599
600 util_barrier_destroy(&barrier);
601
602 free(fences);
603 }
604
605 int64_t
606 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
607 {
608 /* Allow some flexibility by not raising an error. */
609 if (thread_index >= queue->num_threads)
610 return 0;
611
612 return u_thread_get_time_nano(queue->threads[thread_index]);
613 }