2399375a9bc2569bda92aa0bff7cea23502576b7
[mesa.git] / src / glx / indirect_glx.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Soft-
6 * ware"), to deal in the Software without restriction, including without
7 * limitation the rights to use, copy, modify, merge, publish, distribute,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, provided that the above copyright
10 * notice(s) and this permission notice appear in all copies of the Soft-
11 * ware and that both the above copyright notice(s) and this permission
12 * notice appear in supporting documentation.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY
17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE-
19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR-
22 * MANCE OF THIS SOFTWARE.
23 *
24 * Except as contained in this notice, the name of a copyright holder shall
25 * not be used in advertising or otherwise to promote the sale, use or
26 * other dealings in this Software without prior written authorization of
27 * the copyright holder.
28 *
29 * Authors:
30 * Kristian Høgsberg (krh@bitplanet.net)
31 */
32
33 #include "glapi.h"
34 #include "glxclient.h"
35
36 extern struct _glapi_table *__glXNewIndirectAPI(void);
37
38 /*
39 ** All indirect rendering contexts will share the same indirect dispatch table.
40 */
41 static struct _glapi_table *IndirectAPI = NULL;
42
43 static void
44 indirect_destroy_context(struct glx_context *gc)
45 {
46 __glXFreeVertexArrayState(gc);
47
48 if (gc->vendor)
49 XFree((char *) gc->vendor);
50 if (gc->renderer)
51 XFree((char *) gc->renderer);
52 if (gc->version)
53 XFree((char *) gc->version);
54 if (gc->extensions)
55 XFree((char *) gc->extensions);
56 __glFreeAttributeState(gc);
57 XFree((char *) gc->buf);
58 Xfree((char *) gc->client_state_private);
59 XFree((char *) gc);
60 }
61
62 static Bool
63 SendMakeCurrentRequest(Display * dpy, CARD8 opcode,
64 GLXContextID gc_id, GLXContextTag gc_tag,
65 GLXDrawable draw, GLXDrawable read,
66 xGLXMakeCurrentReply * reply)
67 {
68 Bool ret;
69
70 LockDisplay(dpy);
71
72 if (draw == read) {
73 xGLXMakeCurrentReq *req;
74
75 GetReq(GLXMakeCurrent, req);
76 req->reqType = opcode;
77 req->glxCode = X_GLXMakeCurrent;
78 req->drawable = draw;
79 req->context = gc_id;
80 req->oldContextTag = gc_tag;
81 }
82 else {
83 struct glx_display *priv = __glXInitialize(dpy);
84
85 /* If the server can support the GLX 1.3 version, we should
86 * perfer that. Not only that, some servers support GLX 1.3 but
87 * not the SGI extension.
88 */
89
90 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) {
91 xGLXMakeContextCurrentReq *req;
92
93 GetReq(GLXMakeContextCurrent, req);
94 req->reqType = opcode;
95 req->glxCode = X_GLXMakeContextCurrent;
96 req->drawable = draw;
97 req->readdrawable = read;
98 req->context = gc_id;
99 req->oldContextTag = gc_tag;
100 }
101 else {
102 xGLXVendorPrivateWithReplyReq *vpreq;
103 xGLXMakeCurrentReadSGIReq *req;
104
105 GetReqExtra(GLXVendorPrivateWithReply,
106 sz_xGLXMakeCurrentReadSGIReq -
107 sz_xGLXVendorPrivateWithReplyReq, vpreq);
108 req = (xGLXMakeCurrentReadSGIReq *) vpreq;
109 req->reqType = opcode;
110 req->glxCode = X_GLXVendorPrivateWithReply;
111 req->vendorCode = X_GLXvop_MakeCurrentReadSGI;
112 req->drawable = draw;
113 req->readable = read;
114 req->context = gc_id;
115 req->oldContextTag = gc_tag;
116 }
117 }
118
119 ret = _XReply(dpy, (xReply *) reply, 0, False);
120
121 UnlockDisplay(dpy);
122 SyncHandle();
123
124 return ret;
125 }
126
127 static int
128 indirect_bind_context(struct glx_context *gc, struct glx_context *old,
129 GLXDrawable draw, GLXDrawable read)
130 {
131 xGLXMakeCurrentReply reply;
132 GLXContextTag tag;
133 __GLXattribute *state;
134 Display *dpy = gc->psc->dpy;
135 int opcode = __glXSetupForCommand(dpy);
136
137 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) {
138 tag = old->currentContextTag;
139 old->currentContextTag = 0;
140 } else {
141 tag = 0;
142 }
143
144 SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read, &reply);
145
146 if (!IndirectAPI)
147 IndirectAPI = __glXNewIndirectAPI();
148 _glapi_set_dispatch(IndirectAPI);
149
150 gc->currentContextTag = reply.contextTag;
151 state = gc->client_state_private;
152 if (state->array_state == NULL) {
153 glGetString(GL_EXTENSIONS);
154 glGetString(GL_VERSION);
155 __glXInitVertexArrayState(gc);
156 }
157
158 return Success;
159 }
160
161 static void
162 indirect_unbind_context(struct glx_context *gc, struct glx_context *new)
163 {
164 Display *dpy = gc->psc->dpy;
165 int opcode = __glXSetupForCommand(dpy);
166 xGLXMakeCurrentReply reply;
167
168 if (gc == new)
169 return;
170
171 /* We are either switching to no context, away from a indirect
172 * context to a direct context or from one dpy to another and have
173 * to send a request to the dpy to unbind the previous context.
174 */
175 if (!new || new->isDirect || new->psc->dpy != dpy) {
176 SendMakeCurrentRequest(dpy, opcode, None,
177 gc->currentContextTag, None, None, &reply);
178 gc->currentContextTag = 0;
179 }
180 }
181
182 static void
183 indirect_wait_gl(struct glx_context *gc)
184 {
185 xGLXWaitGLReq *req;
186 Display *dpy = gc->currentDpy;
187
188 /* Flush any pending commands out */
189 __glXFlushRenderBuffer(gc, gc->pc);
190
191 /* Send the glXWaitGL request */
192 LockDisplay(dpy);
193 GetReq(GLXWaitGL, req);
194 req->reqType = gc->majorOpcode;
195 req->glxCode = X_GLXWaitGL;
196 req->contextTag = gc->currentContextTag;
197 UnlockDisplay(dpy);
198 SyncHandle();
199 }
200
201 static void
202 indirect_wait_x(struct glx_context *gc)
203 {
204 xGLXWaitXReq *req;
205 Display *dpy = gc->currentDpy;
206
207 /* Flush any pending commands out */
208 __glXFlushRenderBuffer(gc, gc->pc);
209
210 LockDisplay(dpy);
211 GetReq(GLXWaitX, req);
212 req->reqType = gc->majorOpcode;
213 req->glxCode = X_GLXWaitX;
214 req->contextTag = gc->currentContextTag;
215 UnlockDisplay(dpy);
216 SyncHandle();
217 }
218
219 static void
220 indirect_use_x_font(struct glx_context *gc,
221 Font font, int first, int count, int listBase)
222 {
223 xGLXUseXFontReq *req;
224 Display *dpy = gc->currentDpy;
225
226 /* Flush any pending commands out */
227 __glXFlushRenderBuffer(gc, gc->pc);
228
229 /* Send the glXUseFont request */
230 LockDisplay(dpy);
231 GetReq(GLXUseXFont, req);
232 req->reqType = gc->majorOpcode;
233 req->glxCode = X_GLXUseXFont;
234 req->contextTag = gc->currentContextTag;
235 req->font = font;
236 req->first = first;
237 req->count = count;
238 req->listBase = listBase;
239 UnlockDisplay(dpy);
240 SyncHandle();
241 }
242
243 static void
244 indirect_bind_tex_image(Display * dpy,
245 GLXDrawable drawable,
246 int buffer, const int *attrib_list)
247 {
248 xGLXVendorPrivateReq *req;
249 struct glx_context *gc = __glXGetCurrentContext();
250 CARD32 *drawable_ptr;
251 INT32 *buffer_ptr;
252 CARD32 *num_attrib_ptr;
253 CARD32 *attrib_ptr;
254 CARD8 opcode;
255 unsigned int i;
256
257 i = 0;
258 if (attrib_list) {
259 while (attrib_list[i * 2] != None)
260 i++;
261 }
262
263 opcode = __glXSetupForCommand(dpy);
264 if (!opcode)
265 return;
266
267 LockDisplay(dpy);
268 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req);
269 req->reqType = opcode;
270 req->glxCode = X_GLXVendorPrivate;
271 req->vendorCode = X_GLXvop_BindTexImageEXT;
272 req->contextTag = gc->currentContextTag;
273
274 drawable_ptr = (CARD32 *) (req + 1);
275 buffer_ptr = (INT32 *) (drawable_ptr + 1);
276 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1);
277 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1);
278
279 *drawable_ptr = drawable;
280 *buffer_ptr = buffer;
281 *num_attrib_ptr = (CARD32) i;
282
283 i = 0;
284 if (attrib_list) {
285 while (attrib_list[i * 2] != None) {
286 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0];
287 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1];
288 i++;
289 }
290 }
291
292 UnlockDisplay(dpy);
293 SyncHandle();
294 }
295
296 static void
297 indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer)
298 {
299 xGLXVendorPrivateReq *req;
300 struct glx_context *gc = __glXGetCurrentContext();
301 CARD32 *drawable_ptr;
302 INT32 *buffer_ptr;
303 CARD8 opcode;
304
305 opcode = __glXSetupForCommand(dpy);
306 if (!opcode)
307 return;
308
309 LockDisplay(dpy);
310 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req);
311 req->reqType = opcode;
312 req->glxCode = X_GLXVendorPrivate;
313 req->vendorCode = X_GLXvop_ReleaseTexImageEXT;
314 req->contextTag = gc->currentContextTag;
315
316 drawable_ptr = (CARD32 *) (req + 1);
317 buffer_ptr = (INT32 *) (drawable_ptr + 1);
318
319 *drawable_ptr = drawable;
320 *buffer_ptr = buffer;
321
322 UnlockDisplay(dpy);
323 SyncHandle();
324 }
325
326 static const struct glx_context_vtable indirect_context_vtable = {
327 indirect_destroy_context,
328 indirect_bind_context,
329 indirect_unbind_context,
330 indirect_wait_gl,
331 indirect_wait_x,
332 indirect_use_x_font,
333 indirect_bind_tex_image,
334 indirect_release_tex_image,
335 NULL, /* get_proc_address */
336 };
337
338 /**
339 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new
340 * function called \c __glXAllocateClientState that allocates the memory and
341 * does all the initialization (including the pixel pack / unpack).
342 */
343 _X_HIDDEN struct glx_context *
344 indirect_create_context(struct glx_screen *psc,
345 struct glx_config *mode,
346 struct glx_context *shareList, int renderType)
347 {
348 struct glx_context *gc;
349 int bufSize;
350 CARD8 opcode;
351 __GLXattribute *state;
352
353 opcode = __glXSetupForCommand(psc->dpy);
354 if (!opcode) {
355 return NULL;
356 }
357
358 /* Allocate our context record */
359 gc = Xmalloc(sizeof *gc);
360 if (!gc) {
361 /* Out of memory */
362 return NULL;
363 }
364 memset(gc, 0, sizeof *gc);
365
366 glx_context_init(gc, psc, mode);
367 gc->isDirect = GL_FALSE;
368 gc->vtable = &indirect_context_vtable;
369 state = Xmalloc(sizeof(struct __GLXattributeRec));
370 if (state == NULL) {
371 /* Out of memory */
372 Xfree(gc);
373 return NULL;
374 }
375 gc->client_state_private = state;
376 memset(gc->client_state_private, 0, sizeof(struct __GLXattributeRec));
377 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL);
378
379 /*
380 ** Create a temporary buffer to hold GLX rendering commands. The size
381 ** of the buffer is selected so that the maximum number of GLX rendering
382 ** commands can fit in a single X packet and still have room in the X
383 ** packet for the GLXRenderReq header.
384 */
385
386 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq;
387 gc->buf = (GLubyte *) Xmalloc(bufSize);
388 if (!gc->buf) {
389 Xfree(gc->client_state_private);
390 Xfree(gc);
391 return NULL;
392 }
393 gc->bufSize = bufSize;
394
395 /* Fill in the new context */
396 gc->renderMode = GL_RENDER;
397
398 state->storePack.alignment = 4;
399 state->storeUnpack.alignment = 4;
400
401 gc->attributes.stackPointer = &gc->attributes.stack[0];
402
403 /*
404 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up.
405 */
406 gc->fillImage = __glFillImage;
407 gc->pc = gc->buf;
408 gc->bufEnd = gc->buf + bufSize;
409 gc->isDirect = GL_FALSE;
410 if (__glXDebug) {
411 /*
412 ** Set limit register so that there will be one command per packet
413 */
414 gc->limit = gc->buf;
415 }
416 else {
417 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE;
418 }
419 gc->majorOpcode = opcode;
420
421 /*
422 ** Constrain the maximum drawing command size allowed to be
423 ** transfered using the X_GLXRender protocol request. First
424 ** constrain by a software limit, then constrain by the protocl
425 ** limit.
426 */
427 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) {
428 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT;
429 }
430 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) {
431 bufSize = __GLX_MAX_RENDER_CMD_SIZE;
432 }
433 gc->maxSmallRenderCommandSize = bufSize;
434
435
436 return gc;
437 }
438
439 static struct glx_context *
440 indirect_create_context_attribs(struct glx_screen *base,
441 struct glx_config *config_base,
442 struct glx_context *shareList,
443 unsigned num_attribs,
444 const uint32_t *attribs,
445 unsigned *error)
446 {
447 /* All of the attribute validation for indirect contexts is handled on the
448 * server, so there's not much to do here.
449 */
450 (void) num_attribs;
451 (void) attribs;
452
453 /* The error parameter is only used on the server so that correct GLX
454 * protocol errors can be generated. On the client, it can be ignored.
455 */
456 (void) error;
457
458 return indirect_create_context(base, config_base, shareList, 0);
459 }
460
461 struct glx_screen_vtable indirect_screen_vtable = {
462 indirect_create_context,
463 indirect_create_context_attribs
464 };
465
466 _X_HIDDEN struct glx_screen *
467 indirect_create_screen(int screen, struct glx_display * priv)
468 {
469 struct glx_screen *psc;
470
471 psc = Xmalloc(sizeof *psc);
472 if (psc == NULL)
473 return NULL;
474
475 memset(psc, 0, sizeof *psc);
476 glx_screen_init(psc, screen, priv);
477 psc->vtable = &indirect_screen_vtable;
478
479 return psc;
480 }