eae468e211cb039846427a22b5b280fb1851d800
[mesa.git] / src / mesa / main / glthread.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file glthread.c
25 *
26 * Support functions for the glthread feature of Mesa.
27 *
28 * In multicore systems, many applications end up CPU-bound with about half
29 * their time spent inside their rendering thread and half inside Mesa. To
30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31 * quickly logs the GL commands to a buffer to be processed by a worker
32 * thread.
33 */
34
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/marshal.h"
38 #include "util/u_atomic.h"
39 #include "util/u_thread.h"
40
41
42 static void
43 glthread_unmarshal_batch(void *job, int thread_index)
44 {
45 struct glthread_batch *batch = (struct glthread_batch*)job;
46 struct gl_context *ctx = batch->ctx;
47 int pos = 0;
48 int used = batch->used;
49 uint8_t *buffer = batch->buffer;
50
51 _glapi_set_dispatch(ctx->CurrentServerDispatch);
52
53 while (pos < used) {
54 const struct marshal_cmd_base *cmd =
55 (const struct marshal_cmd_base *)&buffer[pos];
56
57 _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd);
58 pos += cmd->cmd_size;
59 }
60
61 assert(pos == used);
62 batch->used = 0;
63 }
64
65 static void
66 glthread_thread_initialization(void *job, int thread_index)
67 {
68 struct gl_context *ctx = (struct gl_context*)job;
69
70 ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread->stats);
71 _glapi_set_context(ctx);
72 }
73
74 void
75 _mesa_glthread_init(struct gl_context *ctx)
76 {
77 struct glthread_state *glthread = calloc(1, sizeof(*glthread));
78
79 if (!glthread)
80 return;
81
82 if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
83 1, 0)) {
84 free(glthread);
85 return;
86 }
87
88 ctx->MarshalExec = _mesa_create_marshal_table(ctx);
89 if (!ctx->MarshalExec) {
90 util_queue_destroy(&glthread->queue);
91 free(glthread);
92 return;
93 }
94
95 for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
96 glthread->batches[i].ctx = ctx;
97 util_queue_fence_init(&glthread->batches[i].fence);
98 }
99
100 glthread->stats.queue = &glthread->queue;
101 ctx->CurrentClientDispatch = ctx->MarshalExec;
102 ctx->GLThread = glthread;
103
104 /* Execute the thread initialization function in the thread. */
105 struct util_queue_fence fence;
106 util_queue_fence_init(&fence);
107 util_queue_add_job(&glthread->queue, ctx, &fence,
108 glthread_thread_initialization, NULL, 0);
109 util_queue_fence_wait(&fence);
110 util_queue_fence_destroy(&fence);
111 }
112
113 void
114 _mesa_glthread_destroy(struct gl_context *ctx)
115 {
116 struct glthread_state *glthread = ctx->GLThread;
117
118 if (!glthread)
119 return;
120
121 _mesa_glthread_finish(ctx);
122 util_queue_destroy(&glthread->queue);
123
124 for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
125 util_queue_fence_destroy(&glthread->batches[i].fence);
126
127 free(glthread);
128 ctx->GLThread = NULL;
129
130 _mesa_glthread_restore_dispatch(ctx, "destroy");
131 }
132
133 void
134 _mesa_glthread_restore_dispatch(struct gl_context *ctx, const char *func)
135 {
136 /* Remove ourselves from the dispatch table except if another ctx/thread
137 * already installed a new dispatch table.
138 *
139 * Typically glxMakeCurrent will bind a new context (install new table) then
140 * old context might be deleted.
141 */
142 if (_glapi_get_dispatch() == ctx->MarshalExec) {
143 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
144 _glapi_set_dispatch(ctx->CurrentClientDispatch);
145 #if 0
146 printf("glthread disabled: %s\n", func);
147 #endif
148 }
149 }
150
151 void
152 _mesa_glthread_flush_batch(struct gl_context *ctx)
153 {
154 struct glthread_state *glthread = ctx->GLThread;
155 if (!glthread)
156 return;
157
158 struct glthread_batch *next = &glthread->batches[glthread->next];
159 if (!next->used)
160 return;
161
162 /* Debug: execute the batch immediately from this thread.
163 *
164 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
165 * need to restore it when it returns.
166 */
167 if (false) {
168 glthread_unmarshal_batch(next, 0);
169 _glapi_set_dispatch(ctx->CurrentClientDispatch);
170 return;
171 }
172
173 p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
174
175 util_queue_add_job(&glthread->queue, next, &next->fence,
176 glthread_unmarshal_batch, NULL, 0);
177 glthread->last = glthread->next;
178 glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
179 }
180
181 /**
182 * Waits for all pending batches have been unmarshaled.
183 *
184 * This can be used by the main thread to synchronize access to the context,
185 * since the worker thread will be idle after this.
186 */
187 void
188 _mesa_glthread_finish(struct gl_context *ctx)
189 {
190 struct glthread_state *glthread = ctx->GLThread;
191 if (!glthread)
192 return;
193
194 /* If this is called from the worker thread, then we've hit a path that
195 * might be called from either the main thread or the worker (such as some
196 * dri interface entrypoints), in which case we don't need to actually
197 * synchronize against ourself.
198 */
199 if (u_thread_is_self(glthread->queue.threads[0]))
200 return;
201
202 struct glthread_batch *last = &glthread->batches[glthread->last];
203 struct glthread_batch *next = &glthread->batches[glthread->next];
204 bool synced = false;
205
206 if (!util_queue_fence_is_signalled(&last->fence)) {
207 util_queue_fence_wait(&last->fence);
208 synced = true;
209 }
210
211 if (next->used) {
212 p_atomic_add(&glthread->stats.num_direct_items, next->used);
213
214 /* Since glthread_unmarshal_batch changes the dispatch to direct,
215 * restore it after it's done.
216 */
217 struct _glapi_table *dispatch = _glapi_get_dispatch();
218 glthread_unmarshal_batch(next, 0);
219 _glapi_set_dispatch(dispatch);
220
221 /* It's not a sync because we don't enqueue partial batches, but
222 * it would be a sync if we did. So count it anyway.
223 */
224 synced = true;
225 }
226
227 if (synced)
228 p_atomic_inc(&glthread->stats.num_syncs);
229 }