06115b916db361a53347b672c9ae1d93601dd4d3
[mesa.git] / src / mesa / main / glthread.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file glthread.c
25 *
26 * Support functions for the glthread feature of Mesa.
27 *
28 * In multicore systems, many applications end up CPU-bound with about half
29 * their time spent inside their rendering thread and half inside Mesa. To
30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31 * quickly logs the GL commands to a buffer to be processed by a worker
32 * thread.
33 */
34
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/marshal.h"
38 #include "main/marshal_generated.h"
39 #include "util/u_thread.h"
40
41 #ifdef HAVE_PTHREAD
42
43 static void
44 glthread_allocate_batch(struct gl_context *ctx)
45 {
46 struct glthread_state *glthread = ctx->GLThread;
47
48 /* TODO: handle memory allocation failure. */
49 glthread->batch = malloc(sizeof(*glthread->batch));
50 if (!glthread->batch)
51 return;
52 memset(glthread->batch, 0, offsetof(struct glthread_batch, buffer));
53 }
54
55 static void
56 glthread_unmarshal_batch(struct gl_context *ctx, struct glthread_batch *batch)
57 {
58 size_t pos = 0;
59
60 _glapi_set_dispatch(ctx->CurrentServerDispatch);
61
62 while (pos < batch->used)
63 pos += _mesa_unmarshal_dispatch_cmd(ctx, &batch->buffer[pos]);
64
65 assert(pos == batch->used);
66
67 free(batch);
68 }
69
70 static void *
71 glthread_worker(void *data)
72 {
73 struct gl_context *ctx = data;
74 struct glthread_state *glthread = ctx->GLThread;
75
76 ctx->Driver.SetBackgroundContext(ctx);
77 _glapi_set_context(ctx);
78
79 u_thread_setname("mesa_glthread");
80
81 pthread_mutex_lock(&glthread->mutex);
82
83 while (true) {
84 struct glthread_batch *batch;
85
86 /* Block (dropping the lock) until new work arrives for us. */
87 while (!glthread->batch_queue && !glthread->shutdown) {
88 pthread_cond_broadcast(&glthread->work_done);
89 pthread_cond_wait(&glthread->new_work, &glthread->mutex);
90 }
91
92 batch = glthread->batch_queue;
93
94 if (glthread->shutdown && !batch) {
95 pthread_cond_broadcast(&glthread->work_done);
96 pthread_mutex_unlock(&glthread->mutex);
97 return NULL;
98 }
99 glthread->batch_queue = batch->next;
100 if (glthread->batch_queue_tail == &batch->next)
101 glthread->batch_queue_tail = &glthread->batch_queue;
102
103 glthread->busy = true;
104 pthread_mutex_unlock(&glthread->mutex);
105
106 glthread_unmarshal_batch(ctx, batch);
107
108 pthread_mutex_lock(&glthread->mutex);
109 glthread->busy = false;
110 }
111
112 /* UNREACHED */
113 return NULL;
114 }
115
116 void
117 _mesa_glthread_init(struct gl_context *ctx)
118 {
119 struct glthread_state *glthread = calloc(1, sizeof(*glthread));
120
121 if (!glthread)
122 return;
123
124 ctx->MarshalExec = _mesa_create_marshal_table(ctx);
125 if (!ctx->MarshalExec) {
126 free(glthread);
127 return;
128 }
129
130 ctx->CurrentClientDispatch = ctx->MarshalExec;
131
132 pthread_mutex_init(&glthread->mutex, NULL);
133 pthread_cond_init(&glthread->new_work, NULL);
134 pthread_cond_init(&glthread->work_done, NULL);
135
136 glthread->batch_queue_tail = &glthread->batch_queue;
137 ctx->GLThread = glthread;
138
139 glthread_allocate_batch(ctx);
140
141 pthread_create(&glthread->thread, NULL, glthread_worker, ctx);
142 }
143
144 void
145 _mesa_glthread_destroy(struct gl_context *ctx)
146 {
147 struct glthread_state *glthread = ctx->GLThread;
148
149 if (!glthread)
150 return;
151
152 _mesa_glthread_flush_batch(ctx);
153
154 pthread_mutex_lock(&glthread->mutex);
155 glthread->shutdown = true;
156 pthread_cond_broadcast(&glthread->new_work);
157 pthread_mutex_unlock(&glthread->mutex);
158
159 /* Since this waits for the thread to exit, it means that all queued work
160 * will have been completed.
161 */
162 pthread_join(glthread->thread, NULL);
163
164 pthread_cond_destroy(&glthread->new_work);
165 pthread_cond_destroy(&glthread->work_done);
166 pthread_mutex_destroy(&glthread->mutex);
167
168 /* Due to the join above, there should be one empty batch allocated at this
169 * point, and no batches queued.
170 */
171 assert(!glthread->batch->used);
172 assert(!glthread->batch->next);
173 free(glthread->batch);
174 assert(!glthread->batch_queue);
175
176 free(glthread);
177 ctx->GLThread = NULL;
178
179 _mesa_glthread_restore_dispatch(ctx);
180 }
181
182 void
183 _mesa_glthread_restore_dispatch(struct gl_context *ctx)
184 {
185 /* Remove ourselves from the dispatch table except if another ctx/thread
186 * already installed a new dispatch table.
187 *
188 * Typically glxMakeCurrent will bind a new context (install new table) then
189 * old context might be deleted.
190 */
191 if (_glapi_get_dispatch() == ctx->MarshalExec) {
192 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
193 _glapi_set_dispatch(ctx->CurrentClientDispatch);
194 }
195 }
196
197 void
198 _mesa_glthread_flush_batch(struct gl_context *ctx)
199 {
200 struct glthread_state *glthread = ctx->GLThread;
201 struct glthread_batch *batch;
202
203 if (!glthread)
204 return;
205
206 batch = glthread->batch;
207 if (!batch->used)
208 return;
209
210 /* Immediately reallocate a new batch, since the next marshalled call would
211 * just do it.
212 */
213 glthread_allocate_batch(ctx);
214
215 /* Debug: execute the batch immediately from this thread.
216 *
217 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
218 * need to restore it when it returns.
219 */
220 if (false) {
221 glthread_unmarshal_batch(ctx, batch);
222 _glapi_set_dispatch(ctx->CurrentClientDispatch);
223 return;
224 }
225
226 pthread_mutex_lock(&glthread->mutex);
227 *glthread->batch_queue_tail = batch;
228 glthread->batch_queue_tail = &batch->next;
229 pthread_cond_broadcast(&glthread->new_work);
230 pthread_mutex_unlock(&glthread->mutex);
231 }
232
233 /**
234 * Waits for all pending batches have been unmarshaled.
235 *
236 * This can be used by the main thread to synchronize access to the context,
237 * since the worker thread will be idle after this.
238 */
239 void
240 _mesa_glthread_finish(struct gl_context *ctx)
241 {
242 struct glthread_state *glthread = ctx->GLThread;
243
244 if (!glthread)
245 return;
246
247 /* If this is called from the worker thread, then we've hit a path that
248 * might be called from either the main thread or the worker (such as some
249 * dri interface entrypoints), in which case we don't need to actually
250 * synchronize against ourself.
251 */
252 if (pthread_self() == glthread->thread)
253 return;
254
255 _mesa_glthread_flush_batch(ctx);
256
257 pthread_mutex_lock(&glthread->mutex);
258
259 while (glthread->batch_queue || glthread->busy)
260 pthread_cond_wait(&glthread->work_done, &glthread->mutex);
261
262 pthread_mutex_unlock(&glthread->mutex);
263 }
264
265 #endif