glapi: Mark vertex attrib pointer functions as async.
[mesa.git] / src / mesa / main / glthread.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file glthread.c
25 *
26 * Support functions for the glthread feature of Mesa.
27 *
28 * In multicore systems, many applications end up CPU-bound with about half
29 * their time spent inside their rendering thread and half inside Mesa. To
30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31 * quickly logs the GL commands to a buffer to be processed by a worker
32 * thread.
33 */
34
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/marshal.h"
38 #include "main/marshal_generated.h"
39
40 #ifdef HAVE_PTHREAD
41
42 static void
43 glthread_allocate_batch(struct gl_context *ctx)
44 {
45 struct glthread_state *glthread = ctx->GLThread;
46
47 /* TODO: handle memory allocation failure. */
48 glthread->batch = calloc(1, sizeof(*glthread->batch));
49 if (!glthread->batch)
50 return;
51 glthread->batch->buffer = malloc(MARSHAL_MAX_CMD_SIZE);
52 }
53
54 static void
55 glthread_unmarshal_batch(struct gl_context *ctx, struct glthread_batch *batch)
56 {
57 size_t pos = 0;
58
59 _glapi_set_dispatch(ctx->CurrentServerDispatch);
60
61 while (pos < batch->used)
62 pos += _mesa_unmarshal_dispatch_cmd(ctx, &batch->buffer[pos]);
63
64 assert(pos == batch->used);
65
66 free(batch->buffer);
67 free(batch);
68 }
69
70 static void *
71 glthread_worker(void *data)
72 {
73 struct gl_context *ctx = data;
74 struct glthread_state *glthread = ctx->GLThread;
75
76 ctx->Driver.SetBackgroundContext(ctx);
77 _glapi_set_context(ctx);
78
79 pthread_mutex_lock(&glthread->mutex);
80
81 while (true) {
82 struct glthread_batch *batch;
83
84 /* Block (dropping the lock) until new work arrives for us. */
85 while (!glthread->batch_queue && !glthread->shutdown) {
86 pthread_cond_broadcast(&glthread->work_done);
87 pthread_cond_wait(&glthread->new_work, &glthread->mutex);
88 }
89
90 batch = glthread->batch_queue;
91
92 if (glthread->shutdown && !batch) {
93 pthread_cond_broadcast(&glthread->work_done);
94 pthread_mutex_unlock(&glthread->mutex);
95 return NULL;
96 }
97 glthread->batch_queue = batch->next;
98 if (glthread->batch_queue_tail == &batch->next)
99 glthread->batch_queue_tail = &glthread->batch_queue;
100
101 glthread->busy = true;
102 pthread_mutex_unlock(&glthread->mutex);
103
104 glthread_unmarshal_batch(ctx, batch);
105
106 pthread_mutex_lock(&glthread->mutex);
107 glthread->busy = false;
108 }
109
110 /* UNREACHED */
111 return NULL;
112 }
113
114 void
115 _mesa_glthread_init(struct gl_context *ctx)
116 {
117 struct glthread_state *glthread = calloc(1, sizeof(*glthread));
118
119 if (!glthread)
120 return;
121
122 ctx->MarshalExec = _mesa_create_marshal_table(ctx);
123 if (!ctx->MarshalExec) {
124 free(glthread);
125 return;
126 }
127
128 ctx->CurrentClientDispatch = ctx->MarshalExec;
129
130 pthread_mutex_init(&glthread->mutex, NULL);
131 pthread_cond_init(&glthread->new_work, NULL);
132 pthread_cond_init(&glthread->work_done, NULL);
133
134 glthread->batch_queue_tail = &glthread->batch_queue;
135 ctx->GLThread = glthread;
136
137 glthread_allocate_batch(ctx);
138
139 pthread_create(&glthread->thread, NULL, glthread_worker, ctx);
140 }
141
142 void
143 _mesa_glthread_destroy(struct gl_context *ctx)
144 {
145 struct glthread_state *glthread = ctx->GLThread;
146
147 if (!glthread)
148 return;
149
150 _mesa_glthread_flush_batch(ctx);
151
152 pthread_mutex_lock(&glthread->mutex);
153 glthread->shutdown = true;
154 pthread_cond_broadcast(&glthread->new_work);
155 pthread_mutex_unlock(&glthread->mutex);
156
157 /* Since this waits for the thread to exit, it means that all queued work
158 * will have been completed.
159 */
160 pthread_join(glthread->thread, NULL);
161
162 pthread_cond_destroy(&glthread->new_work);
163 pthread_cond_destroy(&glthread->work_done);
164 pthread_mutex_destroy(&glthread->mutex);
165
166 /* Due to the join above, there should be one empty batch allocated at this
167 * point, and no batches queued.
168 */
169 assert(!glthread->batch->used);
170 assert(!glthread->batch->next);
171 free(glthread->batch);
172 assert(!glthread->batch_queue);
173
174 free(glthread);
175 ctx->GLThread = NULL;
176
177 /* Remove ourselves from the dispatch table. */
178 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
179 _glapi_set_dispatch(ctx->CurrentClientDispatch);
180 }
181
182 void
183 _mesa_glthread_flush_batch(struct gl_context *ctx)
184 {
185 struct glthread_state *glthread = ctx->GLThread;
186 struct glthread_batch *batch;
187
188 if (!glthread)
189 return;
190
191 batch = glthread->batch;
192 if (!batch->used)
193 return;
194
195 /* Immediately reallocate a new batch, since the next marshalled call would
196 * just do it.
197 */
198 glthread_allocate_batch(ctx);
199
200 /* Debug: execute the batch immediately from this thread.
201 *
202 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
203 * need to restore it when it returns.
204 */
205 if (false) {
206 glthread_unmarshal_batch(ctx, batch);
207 _glapi_set_dispatch(ctx->CurrentClientDispatch);
208 return;
209 }
210
211 pthread_mutex_lock(&glthread->mutex);
212 *glthread->batch_queue_tail = batch;
213 glthread->batch_queue_tail = &batch->next;
214 pthread_cond_broadcast(&glthread->new_work);
215 pthread_mutex_unlock(&glthread->mutex);
216 }
217
218 /**
219 * Waits for all pending batches have been unmarshaled.
220 *
221 * This can be used by the main thread to synchronize access to the context,
222 * since the worker thread will be idle after this.
223 */
224 void
225 _mesa_glthread_finish(struct gl_context *ctx)
226 {
227 struct glthread_state *glthread = ctx->GLThread;
228
229 if (!glthread)
230 return;
231
232 /* If this is called from the worker thread, then we've hit a path that
233 * might be called from either the main thread or the worker (such as some
234 * dri interface entrypoints), in which case we don't need to actually
235 * synchronize against ourself.
236 */
237 if (pthread_self() == glthread->thread)
238 return;
239
240 _mesa_glthread_flush_batch(ctx);
241
242 pthread_mutex_lock(&glthread->mutex);
243
244 while (glthread->batch_queue || glthread->busy)
245 pthread_cond_wait(&glthread->work_done, &glthread->mutex);
246
247 pthread_mutex_unlock(&glthread->mutex);
248 }
249
250 #endif