util: Move fetch_rgba to a separate function table.
[mesa.git] / src / util / u_queue.c
1 /*
2 * Copyright © 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "u_queue.h"
28
29 #include "c11/threads.h"
30
31 #include "util/os_time.h"
32 #include "util/u_string.h"
33 #include "util/u_thread.h"
34 #include "u_process.h"
35
36 #if defined(__linux__)
37 #include <sys/time.h>
38 #include <sys/resource.h>
39 #include <sys/syscall.h>
40 #endif
41
42
43 /* Define 256MB */
44 #define S_256MB (256 * 1024 * 1024)
45
46 static void
47 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
48 bool finish_locked);
49
50 /****************************************************************************
51 * Wait for all queues to assert idle when exit() is called.
52 *
53 * Otherwise, C++ static variable destructors can be called while threads
54 * are using the static variables.
55 */
56
57 static once_flag atexit_once_flag = ONCE_FLAG_INIT;
58 static struct list_head queue_list;
59 static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
60
61 static void
62 atexit_handler(void)
63 {
64 struct util_queue *iter;
65
66 mtx_lock(&exit_mutex);
67 /* Wait for all queues to assert idle. */
68 LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
69 util_queue_kill_threads(iter, 0, false);
70 }
71 mtx_unlock(&exit_mutex);
72 }
73
74 static void
75 global_init(void)
76 {
77 list_inithead(&queue_list);
78 atexit(atexit_handler);
79 }
80
81 static void
82 add_to_atexit_list(struct util_queue *queue)
83 {
84 call_once(&atexit_once_flag, global_init);
85
86 mtx_lock(&exit_mutex);
87 list_add(&queue->head, &queue_list);
88 mtx_unlock(&exit_mutex);
89 }
90
91 static void
92 remove_from_atexit_list(struct util_queue *queue)
93 {
94 struct util_queue *iter, *tmp;
95
96 mtx_lock(&exit_mutex);
97 LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
98 if (iter == queue) {
99 list_del(&iter->head);
100 break;
101 }
102 }
103 mtx_unlock(&exit_mutex);
104 }
105
106 /****************************************************************************
107 * util_queue_fence
108 */
109
110 #ifdef UTIL_QUEUE_FENCE_FUTEX
111 static bool
112 do_futex_fence_wait(struct util_queue_fence *fence,
113 bool timeout, int64_t abs_timeout)
114 {
115 uint32_t v = fence->val;
116 struct timespec ts;
117 ts.tv_sec = abs_timeout / (1000*1000*1000);
118 ts.tv_nsec = abs_timeout % (1000*1000*1000);
119
120 while (v != 0) {
121 if (v != 2) {
122 v = p_atomic_cmpxchg(&fence->val, 1, 2);
123 if (v == 0)
124 return true;
125 }
126
127 int r = futex_wait(&fence->val, 2, timeout ? &ts : NULL);
128 if (timeout && r < 0) {
129 if (errno == ETIMEDOUT)
130 return false;
131 }
132
133 v = fence->val;
134 }
135
136 return true;
137 }
138
139 void
140 _util_queue_fence_wait(struct util_queue_fence *fence)
141 {
142 do_futex_fence_wait(fence, false, 0);
143 }
144
145 bool
146 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
147 int64_t abs_timeout)
148 {
149 return do_futex_fence_wait(fence, true, abs_timeout);
150 }
151
152 #endif
153
154 #ifdef UTIL_QUEUE_FENCE_STANDARD
155 void
156 util_queue_fence_signal(struct util_queue_fence *fence)
157 {
158 mtx_lock(&fence->mutex);
159 fence->signalled = true;
160 cnd_broadcast(&fence->cond);
161 mtx_unlock(&fence->mutex);
162 }
163
164 void
165 _util_queue_fence_wait(struct util_queue_fence *fence)
166 {
167 mtx_lock(&fence->mutex);
168 while (!fence->signalled)
169 cnd_wait(&fence->cond, &fence->mutex);
170 mtx_unlock(&fence->mutex);
171 }
172
173 bool
174 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
175 int64_t abs_timeout)
176 {
177 /* This terrible hack is made necessary by the fact that we really want an
178 * internal interface consistent with os_time_*, but cnd_timedwait is spec'd
179 * to be relative to the TIME_UTC clock.
180 */
181 int64_t rel = abs_timeout - os_time_get_nano();
182
183 if (rel > 0) {
184 struct timespec ts;
185
186 timespec_get(&ts, TIME_UTC);
187
188 ts.tv_sec += abs_timeout / (1000*1000*1000);
189 ts.tv_nsec += abs_timeout % (1000*1000*1000);
190 if (ts.tv_nsec >= (1000*1000*1000)) {
191 ts.tv_sec++;
192 ts.tv_nsec -= (1000*1000*1000);
193 }
194
195 mtx_lock(&fence->mutex);
196 while (!fence->signalled) {
197 if (cnd_timedwait(&fence->cond, &fence->mutex, &ts) != thrd_success)
198 break;
199 }
200 mtx_unlock(&fence->mutex);
201 }
202
203 return fence->signalled;
204 }
205
206 void
207 util_queue_fence_init(struct util_queue_fence *fence)
208 {
209 memset(fence, 0, sizeof(*fence));
210 (void) mtx_init(&fence->mutex, mtx_plain);
211 cnd_init(&fence->cond);
212 fence->signalled = true;
213 }
214
215 void
216 util_queue_fence_destroy(struct util_queue_fence *fence)
217 {
218 assert(fence->signalled);
219
220 /* Ensure that another thread is not in the middle of
221 * util_queue_fence_signal (having set the fence to signalled but still
222 * holding the fence mutex).
223 *
224 * A common contract between threads is that as soon as a fence is signalled
225 * by thread A, thread B is allowed to destroy it. Since
226 * util_queue_fence_is_signalled does not lock the fence mutex (for
227 * performance reasons), we must do so here.
228 */
229 mtx_lock(&fence->mutex);
230 mtx_unlock(&fence->mutex);
231
232 cnd_destroy(&fence->cond);
233 mtx_destroy(&fence->mutex);
234 }
235 #endif
236
237 /****************************************************************************
238 * util_queue implementation
239 */
240
241 struct thread_input {
242 struct util_queue *queue;
243 int thread_index;
244 };
245
246 static int
247 util_queue_thread_func(void *input)
248 {
249 struct util_queue *queue = ((struct thread_input*)input)->queue;
250 int thread_index = ((struct thread_input*)input)->thread_index;
251
252 free(input);
253
254 #ifdef HAVE_PTHREAD_SETAFFINITY
255 if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) {
256 /* Don't inherit the thread affinity from the parent thread.
257 * Set the full mask.
258 */
259 cpu_set_t cpuset;
260 CPU_ZERO(&cpuset);
261 for (unsigned i = 0; i < CPU_SETSIZE; i++)
262 CPU_SET(i, &cpuset);
263
264 pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
265 }
266 #endif
267
268 #if defined(__linux__)
269 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
270 /* The nice() function can only set a maximum of 19. */
271 setpriority(PRIO_PROCESS, syscall(SYS_gettid), 19);
272 }
273 #endif
274
275 if (strlen(queue->name) > 0) {
276 char name[16];
277 snprintf(name, sizeof(name), "%s%i", queue->name, thread_index);
278 u_thread_setname(name);
279 }
280
281 while (1) {
282 struct util_queue_job job;
283
284 mtx_lock(&queue->lock);
285 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
286
287 /* wait if the queue is empty */
288 while (thread_index < queue->num_threads && queue->num_queued == 0)
289 cnd_wait(&queue->has_queued_cond, &queue->lock);
290
291 /* only kill threads that are above "num_threads" */
292 if (thread_index >= queue->num_threads) {
293 mtx_unlock(&queue->lock);
294 break;
295 }
296
297 job = queue->jobs[queue->read_idx];
298 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
299 queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
300
301 queue->num_queued--;
302 cnd_signal(&queue->has_space_cond);
303 if (job.job)
304 queue->total_jobs_size -= job.job_size;
305 mtx_unlock(&queue->lock);
306
307 if (job.job) {
308 job.execute(job.job, thread_index);
309 util_queue_fence_signal(job.fence);
310 if (job.cleanup)
311 job.cleanup(job.job, thread_index);
312 }
313 }
314
315 /* signal remaining jobs if all threads are being terminated */
316 mtx_lock(&queue->lock);
317 if (queue->num_threads == 0) {
318 for (unsigned i = queue->read_idx; i != queue->write_idx;
319 i = (i + 1) % queue->max_jobs) {
320 if (queue->jobs[i].job) {
321 util_queue_fence_signal(queue->jobs[i].fence);
322 queue->jobs[i].job = NULL;
323 }
324 }
325 queue->read_idx = queue->write_idx;
326 queue->num_queued = 0;
327 }
328 mtx_unlock(&queue->lock);
329 return 0;
330 }
331
332 static bool
333 util_queue_create_thread(struct util_queue *queue, unsigned index)
334 {
335 struct thread_input *input =
336 (struct thread_input *) malloc(sizeof(struct thread_input));
337 input->queue = queue;
338 input->thread_index = index;
339
340 queue->threads[index] = u_thread_create(util_queue_thread_func, input);
341
342 if (!queue->threads[index]) {
343 free(input);
344 return false;
345 }
346
347 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
348 #if defined(__linux__) && defined(SCHED_BATCH)
349 struct sched_param sched_param = {0};
350
351 /* The nice() function can only set a maximum of 19.
352 * SCHED_BATCH gives the scheduler a hint that this is a latency
353 * insensitive thread.
354 *
355 * Note that Linux only allows decreasing the priority. The original
356 * priority can't be restored.
357 */
358 pthread_setschedparam(queue->threads[index], SCHED_BATCH, &sched_param);
359 #endif
360 }
361 return true;
362 }
363
364 void
365 util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads)
366 {
367 num_threads = MIN2(num_threads, queue->max_threads);
368 num_threads = MAX2(num_threads, 1);
369
370 mtx_lock(&queue->finish_lock);
371 unsigned old_num_threads = queue->num_threads;
372
373 if (num_threads == old_num_threads) {
374 mtx_unlock(&queue->finish_lock);
375 return;
376 }
377
378 if (num_threads < old_num_threads) {
379 util_queue_kill_threads(queue, num_threads, true);
380 mtx_unlock(&queue->finish_lock);
381 return;
382 }
383
384 /* Create threads.
385 *
386 * We need to update num_threads first, because threads terminate
387 * when thread_index < num_threads.
388 */
389 queue->num_threads = num_threads;
390 for (unsigned i = old_num_threads; i < num_threads; i++) {
391 if (!util_queue_create_thread(queue, i))
392 break;
393 }
394 mtx_unlock(&queue->finish_lock);
395 }
396
397 bool
398 util_queue_init(struct util_queue *queue,
399 const char *name,
400 unsigned max_jobs,
401 unsigned num_threads,
402 unsigned flags)
403 {
404 unsigned i;
405
406 /* Form the thread name from process_name and name, limited to 13
407 * characters. Characters 14-15 are reserved for the thread number.
408 * Character 16 should be 0. Final form: "process:name12"
409 *
410 * If name is too long, it's truncated. If any space is left, the process
411 * name fills it.
412 */
413 const char *process_name = util_get_process_name();
414 int process_len = process_name ? strlen(process_name) : 0;
415 int name_len = strlen(name);
416 const int max_chars = sizeof(queue->name) - 1;
417
418 name_len = MIN2(name_len, max_chars);
419
420 /* See if there is any space left for the process name, reserve 1 for
421 * the colon. */
422 process_len = MIN2(process_len, max_chars - name_len - 1);
423 process_len = MAX2(process_len, 0);
424
425 memset(queue, 0, sizeof(*queue));
426
427 if (process_len) {
428 snprintf(queue->name, sizeof(queue->name), "%.*s:%s",
429 process_len, process_name, name);
430 } else {
431 snprintf(queue->name, sizeof(queue->name), "%s", name);
432 }
433
434 queue->flags = flags;
435 queue->max_threads = num_threads;
436 queue->num_threads = num_threads;
437 queue->max_jobs = max_jobs;
438
439 queue->jobs = (struct util_queue_job*)
440 calloc(max_jobs, sizeof(struct util_queue_job));
441 if (!queue->jobs)
442 goto fail;
443
444 (void) mtx_init(&queue->lock, mtx_plain);
445 (void) mtx_init(&queue->finish_lock, mtx_plain);
446
447 queue->num_queued = 0;
448 cnd_init(&queue->has_queued_cond);
449 cnd_init(&queue->has_space_cond);
450
451 queue->threads = (thrd_t*) calloc(num_threads, sizeof(thrd_t));
452 if (!queue->threads)
453 goto fail;
454
455 /* start threads */
456 for (i = 0; i < num_threads; i++) {
457 if (!util_queue_create_thread(queue, i)) {
458 if (i == 0) {
459 /* no threads created, fail */
460 goto fail;
461 } else {
462 /* at least one thread created, so use it */
463 queue->num_threads = i;
464 break;
465 }
466 }
467 }
468
469 add_to_atexit_list(queue);
470 return true;
471
472 fail:
473 free(queue->threads);
474
475 if (queue->jobs) {
476 cnd_destroy(&queue->has_space_cond);
477 cnd_destroy(&queue->has_queued_cond);
478 mtx_destroy(&queue->lock);
479 free(queue->jobs);
480 }
481 /* also util_queue_is_initialized can be used to check for success */
482 memset(queue, 0, sizeof(*queue));
483 return false;
484 }
485
486 static void
487 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
488 bool finish_locked)
489 {
490 unsigned i;
491
492 /* Signal all threads to terminate. */
493 if (!finish_locked)
494 mtx_lock(&queue->finish_lock);
495
496 if (keep_num_threads >= queue->num_threads) {
497 mtx_unlock(&queue->finish_lock);
498 return;
499 }
500
501 mtx_lock(&queue->lock);
502 unsigned old_num_threads = queue->num_threads;
503 /* Setting num_threads is what causes the threads to terminate.
504 * Then cnd_broadcast wakes them up and they will exit their function.
505 */
506 queue->num_threads = keep_num_threads;
507 cnd_broadcast(&queue->has_queued_cond);
508 mtx_unlock(&queue->lock);
509
510 for (i = keep_num_threads; i < old_num_threads; i++)
511 thrd_join(queue->threads[i], NULL);
512
513 if (!finish_locked)
514 mtx_unlock(&queue->finish_lock);
515 }
516
517 void
518 util_queue_destroy(struct util_queue *queue)
519 {
520 util_queue_kill_threads(queue, 0, false);
521 remove_from_atexit_list(queue);
522
523 cnd_destroy(&queue->has_space_cond);
524 cnd_destroy(&queue->has_queued_cond);
525 mtx_destroy(&queue->finish_lock);
526 mtx_destroy(&queue->lock);
527 free(queue->jobs);
528 free(queue->threads);
529 }
530
531 void
532 util_queue_add_job(struct util_queue *queue,
533 void *job,
534 struct util_queue_fence *fence,
535 util_queue_execute_func execute,
536 util_queue_execute_func cleanup,
537 const size_t job_size)
538 {
539 struct util_queue_job *ptr;
540
541 mtx_lock(&queue->lock);
542 if (queue->num_threads == 0) {
543 mtx_unlock(&queue->lock);
544 /* well no good option here, but any leaks will be
545 * short-lived as things are shutting down..
546 */
547 return;
548 }
549
550 util_queue_fence_reset(fence);
551
552 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
553
554 if (queue->num_queued == queue->max_jobs) {
555 if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL &&
556 queue->total_jobs_size + job_size < S_256MB) {
557 /* If the queue is full, make it larger to avoid waiting for a free
558 * slot.
559 */
560 unsigned new_max_jobs = queue->max_jobs + 8;
561 struct util_queue_job *jobs =
562 (struct util_queue_job*)calloc(new_max_jobs,
563 sizeof(struct util_queue_job));
564 assert(jobs);
565
566 /* Copy all queued jobs into the new list. */
567 unsigned num_jobs = 0;
568 unsigned i = queue->read_idx;
569
570 do {
571 jobs[num_jobs++] = queue->jobs[i];
572 i = (i + 1) % queue->max_jobs;
573 } while (i != queue->write_idx);
574
575 assert(num_jobs == queue->num_queued);
576
577 free(queue->jobs);
578 queue->jobs = jobs;
579 queue->read_idx = 0;
580 queue->write_idx = num_jobs;
581 queue->max_jobs = new_max_jobs;
582 } else {
583 /* Wait until there is a free slot. */
584 while (queue->num_queued == queue->max_jobs)
585 cnd_wait(&queue->has_space_cond, &queue->lock);
586 }
587 }
588
589 ptr = &queue->jobs[queue->write_idx];
590 assert(ptr->job == NULL);
591 ptr->job = job;
592 ptr->fence = fence;
593 ptr->execute = execute;
594 ptr->cleanup = cleanup;
595 ptr->job_size = job_size;
596
597 queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
598 queue->total_jobs_size += ptr->job_size;
599
600 queue->num_queued++;
601 cnd_signal(&queue->has_queued_cond);
602 mtx_unlock(&queue->lock);
603 }
604
605 /**
606 * Remove a queued job. If the job hasn't started execution, it's removed from
607 * the queue. If the job has started execution, the function waits for it to
608 * complete.
609 *
610 * In all cases, the fence is signalled when the function returns.
611 *
612 * The function can be used when destroying an object associated with the job
613 * when you don't care about the job completion state.
614 */
615 void
616 util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence)
617 {
618 bool removed = false;
619
620 if (util_queue_fence_is_signalled(fence))
621 return;
622
623 mtx_lock(&queue->lock);
624 for (unsigned i = queue->read_idx; i != queue->write_idx;
625 i = (i + 1) % queue->max_jobs) {
626 if (queue->jobs[i].fence == fence) {
627 if (queue->jobs[i].cleanup)
628 queue->jobs[i].cleanup(queue->jobs[i].job, -1);
629
630 /* Just clear it. The threads will treat as a no-op job. */
631 memset(&queue->jobs[i], 0, sizeof(queue->jobs[i]));
632 removed = true;
633 break;
634 }
635 }
636 mtx_unlock(&queue->lock);
637
638 if (removed)
639 util_queue_fence_signal(fence);
640 else
641 util_queue_fence_wait(fence);
642 }
643
644 static void
645 util_queue_finish_execute(void *data, int num_thread)
646 {
647 util_barrier *barrier = data;
648 util_barrier_wait(barrier);
649 }
650
651 /**
652 * Wait until all previously added jobs have completed.
653 */
654 void
655 util_queue_finish(struct util_queue *queue)
656 {
657 util_barrier barrier;
658 struct util_queue_fence *fences;
659
660 /* If 2 threads were adding jobs for 2 different barries at the same time,
661 * a deadlock would happen, because 1 barrier requires that all threads
662 * wait for it exclusively.
663 */
664 mtx_lock(&queue->finish_lock);
665
666 /* The number of threads can be changed to 0, e.g. by the atexit handler. */
667 if (!queue->num_threads) {
668 mtx_unlock(&queue->finish_lock);
669 return;
670 }
671
672 fences = malloc(queue->num_threads * sizeof(*fences));
673 util_barrier_init(&barrier, queue->num_threads);
674
675 for (unsigned i = 0; i < queue->num_threads; ++i) {
676 util_queue_fence_init(&fences[i]);
677 util_queue_add_job(queue, &barrier, &fences[i],
678 util_queue_finish_execute, NULL, 0);
679 }
680
681 for (unsigned i = 0; i < queue->num_threads; ++i) {
682 util_queue_fence_wait(&fences[i]);
683 util_queue_fence_destroy(&fences[i]);
684 }
685 mtx_unlock(&queue->finish_lock);
686
687 util_barrier_destroy(&barrier);
688
689 free(fences);
690 }
691
692 int64_t
693 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
694 {
695 /* Allow some flexibility by not raising an error. */
696 if (thread_index >= queue->num_threads)
697 return 0;
698
699 return u_thread_get_time_nano(queue->threads[thread_index]);
700 }