mesa: Create pointers for multithread marshalling dispatch table.
[mesa.git] / src / mesa / main / glthread.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file glthread.c
25 *
26 * Support functions for the glthread feature of Mesa.
27 *
28 * In multicore systems, many applications end up CPU-bound with about half
29 * their time spent inside their rendering thread and half inside Mesa. To
30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31 * quickly logs the GL commands to a buffer to be processed by a worker
32 * thread.
33 */
34
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/marshal.h"
38 #include "main/marshal_generated.h"
39
40 #ifdef HAVE_PTHREAD
41
42 static void
43 glthread_allocate_batch(struct gl_context *ctx)
44 {
45 struct glthread_state *glthread = ctx->GLThread;
46
47 /* TODO: handle memory allocation failure. */
48 glthread->batch = calloc(1, sizeof(*glthread->batch));
49 if (!glthread->batch)
50 return;
51 glthread->batch->buffer = malloc(MARSHAL_MAX_CMD_SIZE);
52 }
53
54 static void
55 glthread_unmarshal_batch(struct gl_context *ctx, struct glthread_batch *batch)
56 {
57 _glapi_set_dispatch(ctx->CurrentServerDispatch);
58
59 free(batch->buffer);
60 free(batch);
61 }
62
63 static void *
64 glthread_worker(void *data)
65 {
66 struct gl_context *ctx = data;
67 struct glthread_state *glthread = ctx->GLThread;
68
69 ctx->Driver.SetBackgroundContext(ctx);
70 _glapi_set_context(ctx);
71
72 pthread_mutex_lock(&glthread->mutex);
73
74 while (true) {
75 struct glthread_batch *batch;
76
77 /* Block (dropping the lock) until new work arrives for us. */
78 while (!glthread->batch_queue && !glthread->shutdown) {
79 pthread_cond_broadcast(&glthread->work_done);
80 pthread_cond_wait(&glthread->new_work, &glthread->mutex);
81 }
82
83 batch = glthread->batch_queue;
84
85 if (glthread->shutdown && !batch) {
86 pthread_cond_broadcast(&glthread->work_done);
87 pthread_mutex_unlock(&glthread->mutex);
88 return NULL;
89 }
90 glthread->batch_queue = batch->next;
91 if (glthread->batch_queue_tail == &batch->next)
92 glthread->batch_queue_tail = &glthread->batch_queue;
93
94 glthread->busy = true;
95 pthread_mutex_unlock(&glthread->mutex);
96
97 glthread_unmarshal_batch(ctx, batch);
98
99 pthread_mutex_lock(&glthread->mutex);
100 glthread->busy = false;
101 }
102
103 /* UNREACHED */
104 return NULL;
105 }
106
107 void
108 _mesa_glthread_init(struct gl_context *ctx)
109 {
110 struct glthread_state *glthread = calloc(1, sizeof(*glthread));
111
112 if (!glthread)
113 return;
114
115 pthread_mutex_init(&glthread->mutex, NULL);
116 pthread_cond_init(&glthread->new_work, NULL);
117 pthread_cond_init(&glthread->work_done, NULL);
118
119 glthread->batch_queue_tail = &glthread->batch_queue;
120 ctx->GLThread = glthread;
121
122 glthread_allocate_batch(ctx);
123
124 pthread_create(&glthread->thread, NULL, glthread_worker, ctx);
125 }
126
127 void
128 _mesa_glthread_destroy(struct gl_context *ctx)
129 {
130 struct glthread_state *glthread = ctx->GLThread;
131
132 if (!glthread)
133 return;
134
135 _mesa_glthread_flush_batch(ctx);
136
137 pthread_mutex_lock(&glthread->mutex);
138 glthread->shutdown = true;
139 pthread_cond_broadcast(&glthread->new_work);
140 pthread_mutex_unlock(&glthread->mutex);
141
142 /* Since this waits for the thread to exit, it means that all queued work
143 * will have been completed.
144 */
145 pthread_join(glthread->thread, NULL);
146
147 pthread_cond_destroy(&glthread->new_work);
148 pthread_cond_destroy(&glthread->work_done);
149 pthread_mutex_destroy(&glthread->mutex);
150
151 /* Due to the join above, there should be one empty batch allocated at this
152 * point, and no batches queued.
153 */
154 assert(!glthread->batch->used);
155 assert(!glthread->batch->next);
156 free(glthread->batch);
157 assert(!glthread->batch_queue);
158
159 free(glthread);
160 ctx->GLThread = NULL;
161
162 /* Remove ourselves from the dispatch table. */
163 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
164 _glapi_set_dispatch(ctx->CurrentClientDispatch);
165 }
166
167 void
168 _mesa_glthread_flush_batch(struct gl_context *ctx)
169 {
170 struct glthread_state *glthread = ctx->GLThread;
171 struct glthread_batch *batch;
172
173 if (!glthread)
174 return;
175
176 batch = glthread->batch;
177 if (!batch->used)
178 return;
179
180 /* Immediately reallocate a new batch, since the next marshalled call would
181 * just do it.
182 */
183 glthread_allocate_batch(ctx);
184
185 /* Debug: execute the batch immediately from this thread.
186 *
187 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
188 * need to restore it when it returns.
189 */
190 if (false) {
191 glthread_unmarshal_batch(ctx, batch);
192 _glapi_set_dispatch(ctx->CurrentClientDispatch);
193 return;
194 }
195
196 pthread_mutex_lock(&glthread->mutex);
197 *glthread->batch_queue_tail = batch;
198 glthread->batch_queue_tail = &batch->next;
199 pthread_cond_broadcast(&glthread->new_work);
200 pthread_mutex_unlock(&glthread->mutex);
201 }
202
203 /**
204 * Waits for all pending batches have been unmarshaled.
205 *
206 * This can be used by the main thread to synchronize access to the context,
207 * since the worker thread will be idle after this.
208 */
209 void
210 _mesa_glthread_finish(struct gl_context *ctx)
211 {
212 struct glthread_state *glthread = ctx->GLThread;
213
214 if (!glthread)
215 return;
216
217 /* If this is called from the worker thread, then we've hit a path that
218 * might be called from either the main thread or the worker (such as some
219 * dri interface entrypoints), in which case we don't need to actually
220 * synchronize against ourself.
221 */
222 if (pthread_self() == glthread->thread)
223 return;
224
225 _mesa_glthread_flush_batch(ctx);
226
227 pthread_mutex_lock(&glthread->mutex);
228
229 while (glthread->batch_queue || glthread->busy)
230 pthread_cond_wait(&glthread->work_done, &glthread->mutex);
231
232 pthread_mutex_unlock(&glthread->mutex);
233 }
234
235 #endif