mesa/version: only enable GL4.1 with correct limits.
[mesa.git] / src / mesa / main / glthread.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file glthread.c
25 *
26 * Support functions for the glthread feature of Mesa.
27 *
28 * In multicore systems, many applications end up CPU-bound with about half
29 * their time spent inside their rendering thread and half inside Mesa. To
30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31 * quickly logs the GL commands to a buffer to be processed by a worker
32 * thread.
33 */
34
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/glthread_marshal.h"
38 #include "main/hash.h"
39 #include "util/u_atomic.h"
40 #include "util/u_thread.h"
41
42
43 static void
44 glthread_unmarshal_batch(void *job, int thread_index)
45 {
46 struct glthread_batch *batch = (struct glthread_batch*)job;
47 struct gl_context *ctx = batch->ctx;
48 int pos = 0;
49 int used = batch->used;
50 uint8_t *buffer = batch->buffer;
51
52 _glapi_set_dispatch(ctx->CurrentServerDispatch);
53
54 while (pos < used) {
55 const struct marshal_cmd_base *cmd =
56 (const struct marshal_cmd_base *)&buffer[pos];
57
58 _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd);
59 pos += cmd->cmd_size;
60 }
61
62 assert(pos == used);
63 batch->used = 0;
64 }
65
66 static void
67 glthread_thread_initialization(void *job, int thread_index)
68 {
69 struct gl_context *ctx = (struct gl_context*)job;
70
71 ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread.stats);
72 _glapi_set_context(ctx);
73 }
74
75 void
76 _mesa_glthread_init(struct gl_context *ctx)
77 {
78 struct glthread_state *glthread = &ctx->GLThread;
79
80 assert(!glthread->enabled);
81
82 if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
83 1, 0)) {
84 return;
85 }
86
87 glthread->VAOs = _mesa_NewHashTable();
88 if (!glthread->VAOs) {
89 util_queue_destroy(&glthread->queue);
90 return;
91 }
92
93 _mesa_glthread_reset_vao(&glthread->DefaultVAO);
94 glthread->CurrentVAO = &glthread->DefaultVAO;
95
96 ctx->MarshalExec = _mesa_create_marshal_table(ctx);
97 if (!ctx->MarshalExec) {
98 _mesa_DeleteHashTable(glthread->VAOs);
99 util_queue_destroy(&glthread->queue);
100 return;
101 }
102
103 for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
104 glthread->batches[i].ctx = ctx;
105 util_queue_fence_init(&glthread->batches[i].fence);
106 }
107 glthread->next_batch = &glthread->batches[glthread->next];
108
109 glthread->enabled = true;
110 glthread->stats.queue = &glthread->queue;
111
112 glthread->SupportsBufferUploads =
113 ctx->Const.BufferCreateMapUnsynchronizedThreadSafe &&
114 ctx->Const.AllowMappedBuffersDuringExecution;
115
116 /* If the draw start index is non-zero, glthread can upload to offset 0,
117 * which means the attrib offset has to be -(first * stride).
118 * So require signed vertex buffer offsets.
119 */
120 glthread->SupportsNonVBOUploads = glthread->SupportsBufferUploads &&
121 ctx->Const.VertexBufferOffsetIsInt32;
122
123 ctx->CurrentClientDispatch = ctx->MarshalExec;
124
125 /* Execute the thread initialization function in the thread. */
126 struct util_queue_fence fence;
127 util_queue_fence_init(&fence);
128 util_queue_add_job(&glthread->queue, ctx, &fence,
129 glthread_thread_initialization, NULL, 0);
130 util_queue_fence_wait(&fence);
131 util_queue_fence_destroy(&fence);
132 }
133
134 static void
135 free_vao(GLuint key, void *data, void *userData)
136 {
137 free(data);
138 }
139
140 void
141 _mesa_glthread_destroy(struct gl_context *ctx)
142 {
143 struct glthread_state *glthread = &ctx->GLThread;
144
145 if (!glthread->enabled)
146 return;
147
148 _mesa_glthread_finish(ctx);
149 util_queue_destroy(&glthread->queue);
150
151 for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
152 util_queue_fence_destroy(&glthread->batches[i].fence);
153
154 _mesa_HashDeleteAll(glthread->VAOs, free_vao, NULL);
155 _mesa_DeleteHashTable(glthread->VAOs);
156
157 ctx->GLThread.enabled = false;
158
159 _mesa_glthread_restore_dispatch(ctx, "destroy");
160 }
161
162 void
163 _mesa_glthread_restore_dispatch(struct gl_context *ctx, const char *func)
164 {
165 /* Remove ourselves from the dispatch table except if another ctx/thread
166 * already installed a new dispatch table.
167 *
168 * Typically glxMakeCurrent will bind a new context (install new table) then
169 * old context might be deleted.
170 */
171 if (_glapi_get_dispatch() == ctx->MarshalExec) {
172 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
173 _glapi_set_dispatch(ctx->CurrentClientDispatch);
174 #if 0
175 printf("glthread disabled: %s\n", func);
176 #endif
177 }
178 }
179
180 void
181 _mesa_glthread_disable(struct gl_context *ctx, const char *func)
182 {
183 _mesa_glthread_finish_before(ctx, func);
184 _mesa_glthread_restore_dispatch(ctx, func);
185 }
186
187 void
188 _mesa_glthread_flush_batch(struct gl_context *ctx)
189 {
190 struct glthread_state *glthread = &ctx->GLThread;
191 if (!glthread->enabled)
192 return;
193
194 struct glthread_batch *next = glthread->next_batch;
195 if (!next->used)
196 return;
197
198 /* Debug: execute the batch immediately from this thread.
199 *
200 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
201 * need to restore it when it returns.
202 */
203 if (false) {
204 glthread_unmarshal_batch(next, 0);
205 _glapi_set_dispatch(ctx->CurrentClientDispatch);
206 return;
207 }
208
209 p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
210
211 util_queue_add_job(&glthread->queue, next, &next->fence,
212 glthread_unmarshal_batch, NULL, 0);
213 glthread->last = glthread->next;
214 glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
215 glthread->next_batch = &glthread->batches[glthread->next];
216 }
217
218 /**
219 * Waits for all pending batches have been unmarshaled.
220 *
221 * This can be used by the main thread to synchronize access to the context,
222 * since the worker thread will be idle after this.
223 */
224 void
225 _mesa_glthread_finish(struct gl_context *ctx)
226 {
227 struct glthread_state *glthread = &ctx->GLThread;
228 if (!glthread->enabled)
229 return;
230
231 /* If this is called from the worker thread, then we've hit a path that
232 * might be called from either the main thread or the worker (such as some
233 * dri interface entrypoints), in which case we don't need to actually
234 * synchronize against ourself.
235 */
236 if (u_thread_is_self(glthread->queue.threads[0]))
237 return;
238
239 struct glthread_batch *last = &glthread->batches[glthread->last];
240 struct glthread_batch *next = glthread->next_batch;
241 bool synced = false;
242
243 if (!util_queue_fence_is_signalled(&last->fence)) {
244 util_queue_fence_wait(&last->fence);
245 synced = true;
246 }
247
248 if (next->used) {
249 p_atomic_add(&glthread->stats.num_direct_items, next->used);
250
251 /* Since glthread_unmarshal_batch changes the dispatch to direct,
252 * restore it after it's done.
253 */
254 struct _glapi_table *dispatch = _glapi_get_dispatch();
255 glthread_unmarshal_batch(next, 0);
256 _glapi_set_dispatch(dispatch);
257
258 /* It's not a sync because we don't enqueue partial batches, but
259 * it would be a sync if we did. So count it anyway.
260 */
261 synced = true;
262 }
263
264 if (synced)
265 p_atomic_inc(&glthread->stats.num_syncs);
266 }
267
268 void
269 _mesa_glthread_finish_before(struct gl_context *ctx, const char *func)
270 {
271 _mesa_glthread_finish(ctx);
272
273 /* Uncomment this if you want to know where glthread syncs. */
274 /*printf("fallback to sync: %s\n", func);*/
275 }