26e82ab1285c52f71c7bfe187ea91b53c81fcc19
[mesa.git] / src / gallium / drivers / llvmpipe / lp_cs_tpool.c
1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 /**
27 * compute shader thread pool.
28 * based on threadpool.c but modified heavily to be compute shader tuned.
29 */
30
31 #include "util/u_thread.h"
32 #include "util/u_memory.h"
33 #include "lp_cs_tpool.h"
34
35 static int
36 lp_cs_tpool_worker(void *data)
37 {
38 struct lp_cs_tpool *pool = data;
39 struct lp_cs_local_mem lmem;
40
41 memset(&lmem, 0, sizeof(lmem));
42 mtx_lock(&pool->m);
43
44 while (!pool->shutdown) {
45 struct lp_cs_tpool_task *task;
46
47 while (list_empty(&pool->workqueue) && !pool->shutdown)
48 cnd_wait(&pool->new_work, &pool->m);
49
50 if (pool->shutdown)
51 break;
52
53 task = list_first_entry(&pool->workqueue, struct lp_cs_tpool_task,
54 list);
55 unsigned this_iter = task->iter_start++;
56
57 if (task->iter_start == task->iter_total)
58 list_del(&task->list);
59
60 mtx_unlock(&pool->m);
61 task->work(task->data, this_iter, &lmem);
62 mtx_lock(&pool->m);
63 task->iter_finished++;
64 if (task->iter_finished == task->iter_total)
65 cnd_broadcast(&task->finish);
66 }
67 mtx_unlock(&pool->m);
68 FREE(lmem.local_mem_ptr);
69 return 0;
70 }
71
72 struct lp_cs_tpool *
73 lp_cs_tpool_create(unsigned num_threads)
74 {
75 struct lp_cs_tpool *pool = CALLOC_STRUCT(lp_cs_tpool);
76
77 if (!pool)
78 return NULL;
79
80 (void) mtx_init(&pool->m, mtx_plain);
81 cnd_init(&pool->new_work);
82
83 list_inithead(&pool->workqueue);
84 assert (num_threads <= LP_MAX_THREADS);
85 pool->num_threads = num_threads;
86 for (unsigned i = 0; i < num_threads; i++)
87 pool->threads[i] = u_thread_create(lp_cs_tpool_worker, pool);
88 return pool;
89 }
90
91 void
92 lp_cs_tpool_destroy(struct lp_cs_tpool *pool)
93 {
94 if (!pool)
95 return;
96
97 mtx_lock(&pool->m);
98 pool->shutdown = true;
99 cnd_broadcast(&pool->new_work);
100 mtx_unlock(&pool->m);
101
102 for (unsigned i = 0; i < pool->num_threads; i++) {
103 thrd_join(pool->threads[i], NULL);
104 }
105
106 cnd_destroy(&pool->new_work);
107 mtx_destroy(&pool->m);
108 FREE(pool);
109 }
110
111 struct lp_cs_tpool_task *
112 lp_cs_tpool_queue_task(struct lp_cs_tpool *pool,
113 lp_cs_tpool_task_func work, void *data, int num_iters)
114 {
115 struct lp_cs_tpool_task *task;
116
117 if (pool->num_threads == 0) {
118 struct lp_cs_local_mem lmem;
119
120 memset(&lmem, 0, sizeof(lmem));
121 for (unsigned t = 0; t < num_iters; t++) {
122 work(data, t, &lmem);
123 }
124 return NULL;
125 }
126 task = CALLOC_STRUCT(lp_cs_tpool_task);
127 if (!task) {
128 return NULL;
129 }
130
131 task->work = work;
132 task->data = data;
133 task->iter_total = num_iters;
134 cnd_init(&task->finish);
135
136 mtx_lock(&pool->m);
137
138 list_addtail(&task->list, &pool->workqueue);
139
140 cnd_signal(&pool->new_work);
141 mtx_unlock(&pool->m);
142 return task;
143 }
144
145 void
146 lp_cs_tpool_wait_for_task(struct lp_cs_tpool *pool,
147 struct lp_cs_tpool_task **task_handle)
148 {
149 struct lp_cs_tpool_task *task = *task_handle;
150
151 if (!pool || !task)
152 return;
153
154 mtx_lock(&pool->m);
155 while (task->iter_finished < task->iter_total)
156 cnd_wait(&task->finish, &pool->m);
157 mtx_unlock(&pool->m);
158
159 cnd_destroy(&task->finish);
160 FREE(task);
161 *task_handle = NULL;
162 }