c4d3f4a434917e0a57a9f83381778bb3627775e2
[mesa.git] / src / mesa / main / glthread.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file glthread.c
25 *
26 * Support functions for the glthread feature of Mesa.
27 *
28 * In multicore systems, many applications end up CPU-bound with about half
29 * their time spent inside their rendering thread and half inside Mesa. To
30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31 * quickly logs the GL commands to a buffer to be processed by a worker
32 * thread.
33 */
34
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/marshal.h"
38 #include "main/marshal_generated.h"
39 #include "util/u_thread.h"
40
41 #ifdef HAVE_PTHREAD
42
43 static void
44 glthread_allocate_batch(struct gl_context *ctx)
45 {
46 struct glthread_state *glthread = ctx->GLThread;
47
48 /* TODO: handle memory allocation failure. */
49 glthread->batch = malloc(sizeof(*glthread->batch));
50 if (!glthread->batch)
51 return;
52 memset(glthread->batch, 0, offsetof(struct glthread_batch, buffer));
53 }
54
55 static void
56 glthread_unmarshal_batch(struct gl_context *ctx, struct glthread_batch *batch,
57 const bool release_batch)
58 {
59 size_t pos = 0;
60
61 _glapi_set_dispatch(ctx->CurrentServerDispatch);
62
63 while (pos < batch->used)
64 pos += _mesa_unmarshal_dispatch_cmd(ctx, &batch->buffer[pos]);
65
66 assert(pos == batch->used);
67
68 if (release_batch)
69 free(batch);
70 else
71 batch->used = 0;
72 }
73
74 static void *
75 glthread_worker(void *data)
76 {
77 struct gl_context *ctx = data;
78 struct glthread_state *glthread = ctx->GLThread;
79
80 ctx->Driver.SetBackgroundContext(ctx);
81 _glapi_set_context(ctx);
82
83 u_thread_setname("mesa_glthread");
84
85 pthread_mutex_lock(&glthread->mutex);
86
87 while (true) {
88 struct glthread_batch *batch;
89
90 /* Block (dropping the lock) until new work arrives for us. */
91 while (!glthread->batch_queue && !glthread->shutdown) {
92 pthread_cond_broadcast(&glthread->work_done);
93 pthread_cond_wait(&glthread->new_work, &glthread->mutex);
94 }
95
96 batch = glthread->batch_queue;
97
98 if (glthread->shutdown && !batch) {
99 pthread_cond_broadcast(&glthread->work_done);
100 pthread_mutex_unlock(&glthread->mutex);
101 return NULL;
102 }
103 glthread->batch_queue = batch->next;
104 if (glthread->batch_queue_tail == &batch->next)
105 glthread->batch_queue_tail = &glthread->batch_queue;
106
107 glthread->busy = true;
108 pthread_mutex_unlock(&glthread->mutex);
109
110 glthread_unmarshal_batch(ctx, batch, true);
111
112 pthread_mutex_lock(&glthread->mutex);
113 glthread->busy = false;
114 }
115
116 /* UNREACHED */
117 return NULL;
118 }
119
120 void
121 _mesa_glthread_init(struct gl_context *ctx)
122 {
123 struct glthread_state *glthread = calloc(1, sizeof(*glthread));
124
125 if (!glthread)
126 return;
127
128 ctx->MarshalExec = _mesa_create_marshal_table(ctx);
129 if (!ctx->MarshalExec) {
130 free(glthread);
131 return;
132 }
133
134 ctx->CurrentClientDispatch = ctx->MarshalExec;
135
136 pthread_mutex_init(&glthread->mutex, NULL);
137 pthread_cond_init(&glthread->new_work, NULL);
138 pthread_cond_init(&glthread->work_done, NULL);
139
140 glthread->batch_queue_tail = &glthread->batch_queue;
141 ctx->GLThread = glthread;
142
143 glthread_allocate_batch(ctx);
144
145 pthread_create(&glthread->thread, NULL, glthread_worker, ctx);
146 }
147
148 void
149 _mesa_glthread_destroy(struct gl_context *ctx)
150 {
151 struct glthread_state *glthread = ctx->GLThread;
152
153 if (!glthread)
154 return;
155
156 _mesa_glthread_flush_batch(ctx);
157
158 pthread_mutex_lock(&glthread->mutex);
159 glthread->shutdown = true;
160 pthread_cond_broadcast(&glthread->new_work);
161 pthread_mutex_unlock(&glthread->mutex);
162
163 /* Since this waits for the thread to exit, it means that all queued work
164 * will have been completed.
165 */
166 pthread_join(glthread->thread, NULL);
167
168 pthread_cond_destroy(&glthread->new_work);
169 pthread_cond_destroy(&glthread->work_done);
170 pthread_mutex_destroy(&glthread->mutex);
171
172 /* Due to the join above, there should be one empty batch allocated at this
173 * point, and no batches queued.
174 */
175 assert(!glthread->batch->used);
176 assert(!glthread->batch->next);
177 free(glthread->batch);
178 assert(!glthread->batch_queue);
179
180 free(glthread);
181 ctx->GLThread = NULL;
182
183 _mesa_glthread_restore_dispatch(ctx);
184 }
185
186 void
187 _mesa_glthread_restore_dispatch(struct gl_context *ctx)
188 {
189 /* Remove ourselves from the dispatch table except if another ctx/thread
190 * already installed a new dispatch table.
191 *
192 * Typically glxMakeCurrent will bind a new context (install new table) then
193 * old context might be deleted.
194 */
195 if (_glapi_get_dispatch() == ctx->MarshalExec) {
196 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
197 _glapi_set_dispatch(ctx->CurrentClientDispatch);
198 }
199 }
200
201 static void
202 _mesa_glthread_flush_batch_locked(struct gl_context *ctx)
203 {
204 struct glthread_state *glthread = ctx->GLThread;
205 struct glthread_batch *batch = glthread->batch;
206
207 if (!batch->used)
208 return;
209
210 /* Immediately reallocate a new batch, since the next marshalled call would
211 * just do it.
212 */
213 glthread_allocate_batch(ctx);
214
215 /* Debug: execute the batch immediately from this thread.
216 *
217 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
218 * need to restore it when it returns.
219 */
220 if (false) {
221 glthread_unmarshal_batch(ctx, batch, true);
222 _glapi_set_dispatch(ctx->CurrentClientDispatch);
223 return;
224 }
225
226 *glthread->batch_queue_tail = batch;
227 glthread->batch_queue_tail = &batch->next;
228 pthread_cond_broadcast(&glthread->new_work);
229 }
230
231 void
232 _mesa_glthread_flush_batch(struct gl_context *ctx)
233 {
234 struct glthread_state *glthread = ctx->GLThread;
235 struct glthread_batch *batch;
236
237 if (!glthread)
238 return;
239
240 batch = glthread->batch;
241 if (!batch->used)
242 return;
243
244 pthread_mutex_lock(&glthread->mutex);
245 _mesa_glthread_flush_batch_locked(ctx);
246 pthread_mutex_unlock(&glthread->mutex);
247 }
248
249 /**
250 * Waits for all pending batches have been unmarshaled.
251 *
252 * This can be used by the main thread to synchronize access to the context,
253 * since the worker thread will be idle after this.
254 */
255 void
256 _mesa_glthread_finish(struct gl_context *ctx)
257 {
258 struct glthread_state *glthread = ctx->GLThread;
259
260 if (!glthread)
261 return;
262
263 /* If this is called from the worker thread, then we've hit a path that
264 * might be called from either the main thread or the worker (such as some
265 * dri interface entrypoints), in which case we don't need to actually
266 * synchronize against ourself.
267 */
268 if (pthread_self() == glthread->thread)
269 return;
270
271 pthread_mutex_lock(&glthread->mutex);
272
273 if (!(glthread->batch_queue || glthread->busy)) {
274 if (glthread->batch && glthread->batch->used) {
275 struct _glapi_table *dispatch = _glapi_get_dispatch();
276 glthread_unmarshal_batch(ctx, glthread->batch, false);
277 _glapi_set_dispatch(dispatch);
278 }
279 } else {
280 _mesa_glthread_flush_batch_locked(ctx);
281 while (glthread->batch_queue || glthread->busy)
282 pthread_cond_wait(&glthread->work_done, &glthread->mutex);
283 }
284
285 pthread_mutex_unlock(&glthread->mutex);
286 }
287
288 #endif