mesa/glthread: remove HAVE_PTHREAD guards
[mesa.git] / src / mesa / main / glthread.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file glthread.c
25 *
26 * Support functions for the glthread feature of Mesa.
27 *
28 * In multicore systems, many applications end up CPU-bound with about half
29 * their time spent inside their rendering thread and half inside Mesa. To
30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31 * quickly logs the GL commands to a buffer to be processed by a worker
32 * thread.
33 */
34
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/marshal.h"
38 #include "main/marshal_generated.h"
39 #include "util/u_thread.h"
40
41 static void
42 glthread_allocate_batch(struct gl_context *ctx)
43 {
44 struct glthread_state *glthread = ctx->GLThread;
45
46 /* TODO: handle memory allocation failure. */
47 glthread->batch = malloc(sizeof(*glthread->batch));
48 if (!glthread->batch)
49 return;
50 memset(glthread->batch, 0, offsetof(struct glthread_batch, buffer));
51 }
52
53 static void
54 glthread_unmarshal_batch(struct gl_context *ctx, struct glthread_batch *batch,
55 const bool release_batch)
56 {
57 size_t pos = 0;
58
59 _glapi_set_dispatch(ctx->CurrentServerDispatch);
60
61 while (pos < batch->used)
62 pos += _mesa_unmarshal_dispatch_cmd(ctx, &batch->buffer[pos]);
63
64 assert(pos == batch->used);
65
66 if (release_batch)
67 free(batch);
68 else
69 batch->used = 0;
70 }
71
72 static void *
73 glthread_worker(void *data)
74 {
75 struct gl_context *ctx = data;
76 struct glthread_state *glthread = ctx->GLThread;
77
78 ctx->Driver.SetBackgroundContext(ctx);
79 _glapi_set_context(ctx);
80
81 u_thread_setname("mesa_glthread");
82
83 pthread_mutex_lock(&glthread->mutex);
84
85 while (true) {
86 struct glthread_batch *batch;
87
88 /* Block (dropping the lock) until new work arrives for us. */
89 while (!glthread->batch_queue && !glthread->shutdown) {
90 pthread_cond_broadcast(&glthread->work_done);
91 pthread_cond_wait(&glthread->new_work, &glthread->mutex);
92 }
93
94 batch = glthread->batch_queue;
95
96 if (glthread->shutdown && !batch) {
97 pthread_cond_broadcast(&glthread->work_done);
98 pthread_mutex_unlock(&glthread->mutex);
99 return NULL;
100 }
101 glthread->batch_queue = batch->next;
102 if (glthread->batch_queue_tail == &batch->next)
103 glthread->batch_queue_tail = &glthread->batch_queue;
104
105 glthread->busy = true;
106 pthread_mutex_unlock(&glthread->mutex);
107
108 glthread_unmarshal_batch(ctx, batch, true);
109
110 pthread_mutex_lock(&glthread->mutex);
111 glthread->busy = false;
112 }
113
114 /* UNREACHED */
115 return NULL;
116 }
117
118 void
119 _mesa_glthread_init(struct gl_context *ctx)
120 {
121 struct glthread_state *glthread = calloc(1, sizeof(*glthread));
122
123 if (!glthread)
124 return;
125
126 ctx->MarshalExec = _mesa_create_marshal_table(ctx);
127 if (!ctx->MarshalExec) {
128 free(glthread);
129 return;
130 }
131
132 ctx->CurrentClientDispatch = ctx->MarshalExec;
133
134 pthread_mutex_init(&glthread->mutex, NULL);
135 pthread_cond_init(&glthread->new_work, NULL);
136 pthread_cond_init(&glthread->work_done, NULL);
137
138 glthread->batch_queue_tail = &glthread->batch_queue;
139 ctx->GLThread = glthread;
140
141 glthread_allocate_batch(ctx);
142
143 pthread_create(&glthread->thread, NULL, glthread_worker, ctx);
144 }
145
146 void
147 _mesa_glthread_destroy(struct gl_context *ctx)
148 {
149 struct glthread_state *glthread = ctx->GLThread;
150
151 if (!glthread)
152 return;
153
154 _mesa_glthread_flush_batch(ctx);
155
156 pthread_mutex_lock(&glthread->mutex);
157 glthread->shutdown = true;
158 pthread_cond_broadcast(&glthread->new_work);
159 pthread_mutex_unlock(&glthread->mutex);
160
161 /* Since this waits for the thread to exit, it means that all queued work
162 * will have been completed.
163 */
164 pthread_join(glthread->thread, NULL);
165
166 pthread_cond_destroy(&glthread->new_work);
167 pthread_cond_destroy(&glthread->work_done);
168 pthread_mutex_destroy(&glthread->mutex);
169
170 /* Due to the join above, there should be one empty batch allocated at this
171 * point, and no batches queued.
172 */
173 assert(!glthread->batch->used);
174 assert(!glthread->batch->next);
175 free(glthread->batch);
176 assert(!glthread->batch_queue);
177
178 free(glthread);
179 ctx->GLThread = NULL;
180
181 _mesa_glthread_restore_dispatch(ctx);
182 }
183
184 void
185 _mesa_glthread_restore_dispatch(struct gl_context *ctx)
186 {
187 /* Remove ourselves from the dispatch table except if another ctx/thread
188 * already installed a new dispatch table.
189 *
190 * Typically glxMakeCurrent will bind a new context (install new table) then
191 * old context might be deleted.
192 */
193 if (_glapi_get_dispatch() == ctx->MarshalExec) {
194 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
195 _glapi_set_dispatch(ctx->CurrentClientDispatch);
196 }
197 }
198
199 static void
200 _mesa_glthread_flush_batch_locked(struct gl_context *ctx)
201 {
202 struct glthread_state *glthread = ctx->GLThread;
203 struct glthread_batch *batch = glthread->batch;
204
205 if (!batch->used)
206 return;
207
208 /* Immediately reallocate a new batch, since the next marshalled call would
209 * just do it.
210 */
211 glthread_allocate_batch(ctx);
212
213 /* Debug: execute the batch immediately from this thread.
214 *
215 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
216 * need to restore it when it returns.
217 */
218 if (false) {
219 glthread_unmarshal_batch(ctx, batch, true);
220 _glapi_set_dispatch(ctx->CurrentClientDispatch);
221 return;
222 }
223
224 *glthread->batch_queue_tail = batch;
225 glthread->batch_queue_tail = &batch->next;
226 pthread_cond_broadcast(&glthread->new_work);
227 }
228
229 void
230 _mesa_glthread_flush_batch(struct gl_context *ctx)
231 {
232 struct glthread_state *glthread = ctx->GLThread;
233 struct glthread_batch *batch;
234
235 if (!glthread)
236 return;
237
238 batch = glthread->batch;
239 if (!batch->used)
240 return;
241
242 pthread_mutex_lock(&glthread->mutex);
243 _mesa_glthread_flush_batch_locked(ctx);
244 pthread_mutex_unlock(&glthread->mutex);
245 }
246
247 /**
248 * Waits for all pending batches have been unmarshaled.
249 *
250 * This can be used by the main thread to synchronize access to the context,
251 * since the worker thread will be idle after this.
252 */
253 void
254 _mesa_glthread_finish(struct gl_context *ctx)
255 {
256 struct glthread_state *glthread = ctx->GLThread;
257
258 if (!glthread)
259 return;
260
261 /* If this is called from the worker thread, then we've hit a path that
262 * might be called from either the main thread or the worker (such as some
263 * dri interface entrypoints), in which case we don't need to actually
264 * synchronize against ourself.
265 */
266 if (pthread_equal(pthread_self(), glthread->thread))
267 return;
268
269 pthread_mutex_lock(&glthread->mutex);
270
271 if (!(glthread->batch_queue || glthread->busy)) {
272 if (glthread->batch && glthread->batch->used) {
273 struct _glapi_table *dispatch = _glapi_get_dispatch();
274 glthread_unmarshal_batch(ctx, glthread->batch, false);
275 _glapi_set_dispatch(dispatch);
276 }
277 } else {
278 _mesa_glthread_flush_batch_locked(ctx);
279 while (glthread->batch_queue || glthread->busy)
280 pthread_cond_wait(&glthread->work_done, &glthread->mutex);
281 }
282
283 pthread_mutex_unlock(&glthread->mutex);
284 }